problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
18.9k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
465
23.6k
num_tokens_prompt
int64
556
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_19299
rasdani/github-patches
git_diff
bentoml__BentoML-4685
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bug: module 'socket' has no attribute 'AF_UNIX' ### Describe the bug Hello, I'm trying to use Bentoml by playing with the quick start examples. When running the Iris classification example on a windows machine, I have this error message: ``` File "C:\Users\Path\lib\site-packages\uvicorn\server.py", line 140, in startup sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM) AttributeError: module 'socket' has no attribute 'AF_UNIX' ``` I tried to change the socket attribute to AF_INET, the error messages disappear but the client cannot connect to the bentoml server. Thanks, ### To reproduce _No response_ ### Expected behavior _No response_ ### Environment bentoml:1.2.12 python:3.9.18 uvicorn:0.29.0 Windows: 11 Pro 22H2 </issue> <code> [start of src/_bentoml_impl/worker/service.py] 1 from __future__ import annotations 2 3 import json 4 import os 5 import typing as t 6 7 import click 8 9 10 @click.command() 11 @click.argument("bento_identifier", type=click.STRING, required=False, default=".") 12 @click.option("--service-name", type=click.STRING, required=False, default="") 13 @click.option( 14 "--fd", 15 type=click.INT, 16 required=True, 17 help="File descriptor of the socket to listen on", 18 ) 19 @click.option( 20 "--runner-map", 21 type=click.STRING, 22 envvar="BENTOML_RUNNER_MAP", 23 help="JSON string of runners map, default sets to envars `BENTOML_RUNNER_MAP`", 24 ) 25 @click.option( 26 "--backlog", type=click.INT, default=2048, help="Backlog size for the socket" 27 ) 28 @click.option( 29 "--prometheus-dir", 30 type=click.Path(exists=True), 31 help="Required by prometheus to pass the metrics in multi-process mode", 32 ) 33 @click.option( 34 "--worker-env", type=click.STRING, default=None, help="Environment variables" 35 ) 36 @click.option( 37 "--worker-id", 38 required=False, 39 type=click.INT, 40 default=None, 41 help="If set, start the server as a bare worker with the given worker ID. Otherwise start a standalone server with a supervisor process.", 42 ) 43 @click.option( 44 "--ssl-certfile", 45 type=str, 46 default=None, 47 help="SSL certificate file", 48 ) 49 @click.option( 50 "--ssl-keyfile", 51 type=str, 52 default=None, 53 help="SSL key file", 54 ) 55 @click.option( 56 "--ssl-keyfile-password", 57 type=str, 58 default=None, 59 help="SSL keyfile password", 60 ) 61 @click.option( 62 "--ssl-version", 63 type=int, 64 default=None, 65 help="SSL version to use (see stdlib 'ssl' module)", 66 ) 67 @click.option( 68 "--ssl-cert-reqs", 69 type=int, 70 default=None, 71 help="Whether client certificate is required (see stdlib 'ssl' module)", 72 ) 73 @click.option( 74 "--ssl-ca-certs", 75 type=str, 76 default=None, 77 help="CA certificates file", 78 ) 79 @click.option( 80 "--ssl-ciphers", 81 type=str, 82 default=None, 83 help="Ciphers to use (see stdlib 'ssl' module)", 84 ) 85 @click.option( 86 "--development-mode", 87 type=click.BOOL, 88 help="Run the API server in development mode", 89 is_flag=True, 90 default=False, 91 show_default=True, 92 ) 93 @click.option( 94 "--timeout", 95 type=click.INT, 96 help="Specify the timeout for API server", 97 ) 98 def main( 99 bento_identifier: str, 100 service_name: str, 101 fd: int, 102 runner_map: str | None, 103 backlog: int, 104 worker_env: str | None, 105 worker_id: int | None, 106 prometheus_dir: str | None, 107 ssl_certfile: str | None, 108 ssl_keyfile: str | None, 109 ssl_keyfile_password: str | None, 110 ssl_version: int | None, 111 ssl_cert_reqs: int | None, 112 ssl_ca_certs: str | None, 113 ssl_ciphers: str | None, 114 development_mode: bool, 115 timeout: int, 116 ): 117 """ 118 Start a HTTP server worker for given service. 119 """ 120 import psutil 121 import uvicorn 122 123 if worker_env: 124 env_list: list[dict[str, t.Any]] = json.loads(worker_env) 125 if worker_id is not None: 126 # worker id from circus starts from 1 127 worker_key = worker_id - 1 128 if worker_key >= len(env_list): 129 raise IndexError( 130 f"Worker ID {worker_id} is out of range, " 131 f"the maximum worker ID is {len(env_list)}" 132 ) 133 os.environ.update(env_list[worker_key]) 134 135 from _bentoml_impl.loader import import_service 136 from bentoml._internal.container import BentoMLContainer 137 from bentoml._internal.context import server_context 138 from bentoml._internal.log import configure_server_logging 139 140 if runner_map: 141 BentoMLContainer.remote_runner_mapping.set( 142 t.cast(t.Dict[str, str], json.loads(runner_map)) 143 ) 144 145 service = import_service(bento_identifier) 146 147 if service_name and service_name != service.name: 148 service = service.find_dependent(service_name) 149 server_context.service_type = "service" 150 else: 151 server_context.service_type = "entry_service" 152 153 if worker_id is not None: 154 server_context.worker_index = worker_id 155 156 configure_server_logging() 157 BentoMLContainer.development_mode.set(development_mode) 158 159 if prometheus_dir is not None: 160 BentoMLContainer.prometheus_multiproc_dir.set(prometheus_dir) 161 server_context.service_name = service.name 162 163 asgi_app = service.to_asgi( 164 is_main=server_context.service_type == "entry_service", init=False 165 ) 166 167 uvicorn_extra_options: dict[str, t.Any] = {} 168 if ssl_version is not None: 169 uvicorn_extra_options["ssl_version"] = ssl_version 170 if ssl_cert_reqs is not None: 171 uvicorn_extra_options["ssl_cert_reqs"] = ssl_cert_reqs 172 if ssl_ciphers is not None: 173 uvicorn_extra_options["ssl_ciphers"] = ssl_ciphers 174 175 if psutil.WINDOWS: 176 # 1. uvloop is not supported on Windows 177 # 2. the default policy for Python > 3.8 on Windows is ProactorEventLoop, which doesn't 178 # support listen on a existing socket file descriptors 179 # See https://docs.python.org/3.8/library/asyncio-platforms.html#windows 180 uvicorn_extra_options["loop"] = "asyncio" 181 import asyncio 182 183 asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore 184 185 uvicorn.run( 186 app=asgi_app, 187 fd=fd, 188 backlog=backlog, 189 log_config=None, 190 workers=1, 191 ssl_certfile=ssl_certfile, 192 ssl_keyfile=ssl_keyfile, 193 ssl_keyfile_password=ssl_keyfile_password, 194 ssl_ca_certs=ssl_ca_certs, 195 server_header=False, 196 **uvicorn_extra_options, 197 ) 198 199 200 if __name__ == "__main__": 201 main() # pylint: disable=no-value-for-parameter 202 [end of src/_bentoml_impl/worker/service.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/_bentoml_impl/worker/service.py b/src/_bentoml_impl/worker/service.py --- a/src/_bentoml_impl/worker/service.py +++ b/src/_bentoml_impl/worker/service.py @@ -117,6 +117,8 @@ """ Start a HTTP server worker for given service. """ + import socket + import psutil import uvicorn @@ -182,9 +184,8 @@ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore - uvicorn.run( + config = uvicorn.Config( app=asgi_app, - fd=fd, backlog=backlog, log_config=None, workers=1, @@ -195,6 +196,8 @@ server_header=False, **uvicorn_extra_options, ) + socket = socket.socket(fileno=fd) + uvicorn.Server(config).run(sockets=[socket]) if __name__ == "__main__":
{"golden_diff": "diff --git a/src/_bentoml_impl/worker/service.py b/src/_bentoml_impl/worker/service.py\n--- a/src/_bentoml_impl/worker/service.py\n+++ b/src/_bentoml_impl/worker/service.py\n@@ -117,6 +117,8 @@\n \"\"\"\n Start a HTTP server worker for given service.\n \"\"\"\n+ import socket\n+\n import psutil\n import uvicorn\n \n@@ -182,9 +184,8 @@\n \n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore\n \n- uvicorn.run(\n+ config = uvicorn.Config(\n app=asgi_app,\n- fd=fd,\n backlog=backlog,\n log_config=None,\n workers=1,\n@@ -195,6 +196,8 @@\n server_header=False,\n **uvicorn_extra_options,\n )\n+ socket = socket.socket(fileno=fd)\n+ uvicorn.Server(config).run(sockets=[socket])\n \n \n if __name__ == \"__main__\":\n", "issue": "bug: module 'socket' has no attribute 'AF_UNIX'\n### Describe the bug\r\n\r\nHello,\r\nI'm trying to use Bentoml by playing with the quick start examples. When running the Iris classification example on a windows machine, I have this error message:\r\n```\r\nFile \"C:\\Users\\Path\\lib\\site-packages\\uvicorn\\server.py\", line 140, in startup\r\n sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)\r\nAttributeError: module 'socket' has no attribute 'AF_UNIX'\r\n```\r\n\r\nI tried to change the socket attribute to AF_INET, the error messages disappear but the client cannot connect to the bentoml server.\r\n\r\nThanks,\r\n\r\n\r\n### To reproduce\r\n\r\n_No response_\r\n\r\n### Expected behavior\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\nbentoml:1.2.12\r\npython:3.9.18\r\nuvicorn:0.29.0\r\nWindows: 11 Pro 22H2\n", "before_files": [{"content": "from __future__ import annotations\n\nimport json\nimport os\nimport typing as t\n\nimport click\n\n\n@click.command()\n@click.argument(\"bento_identifier\", type=click.STRING, required=False, default=\".\")\n@click.option(\"--service-name\", type=click.STRING, required=False, default=\"\")\n@click.option(\n \"--fd\",\n type=click.INT,\n required=True,\n help=\"File descriptor of the socket to listen on\",\n)\n@click.option(\n \"--runner-map\",\n type=click.STRING,\n envvar=\"BENTOML_RUNNER_MAP\",\n help=\"JSON string of runners map, default sets to envars `BENTOML_RUNNER_MAP`\",\n)\n@click.option(\n \"--backlog\", type=click.INT, default=2048, help=\"Backlog size for the socket\"\n)\n@click.option(\n \"--prometheus-dir\",\n type=click.Path(exists=True),\n help=\"Required by prometheus to pass the metrics in multi-process mode\",\n)\n@click.option(\n \"--worker-env\", type=click.STRING, default=None, help=\"Environment variables\"\n)\n@click.option(\n \"--worker-id\",\n required=False,\n type=click.INT,\n default=None,\n help=\"If set, start the server as a bare worker with the given worker ID. Otherwise start a standalone server with a supervisor process.\",\n)\n@click.option(\n \"--ssl-certfile\",\n type=str,\n default=None,\n help=\"SSL certificate file\",\n)\n@click.option(\n \"--ssl-keyfile\",\n type=str,\n default=None,\n help=\"SSL key file\",\n)\n@click.option(\n \"--ssl-keyfile-password\",\n type=str,\n default=None,\n help=\"SSL keyfile password\",\n)\n@click.option(\n \"--ssl-version\",\n type=int,\n default=None,\n help=\"SSL version to use (see stdlib 'ssl' module)\",\n)\n@click.option(\n \"--ssl-cert-reqs\",\n type=int,\n default=None,\n help=\"Whether client certificate is required (see stdlib 'ssl' module)\",\n)\n@click.option(\n \"--ssl-ca-certs\",\n type=str,\n default=None,\n help=\"CA certificates file\",\n)\n@click.option(\n \"--ssl-ciphers\",\n type=str,\n default=None,\n help=\"Ciphers to use (see stdlib 'ssl' module)\",\n)\n@click.option(\n \"--development-mode\",\n type=click.BOOL,\n help=\"Run the API server in development mode\",\n is_flag=True,\n default=False,\n show_default=True,\n)\n@click.option(\n \"--timeout\",\n type=click.INT,\n help=\"Specify the timeout for API server\",\n)\ndef main(\n bento_identifier: str,\n service_name: str,\n fd: int,\n runner_map: str | None,\n backlog: int,\n worker_env: str | None,\n worker_id: int | None,\n prometheus_dir: str | None,\n ssl_certfile: str | None,\n ssl_keyfile: str | None,\n ssl_keyfile_password: str | None,\n ssl_version: int | None,\n ssl_cert_reqs: int | None,\n ssl_ca_certs: str | None,\n ssl_ciphers: str | None,\n development_mode: bool,\n timeout: int,\n):\n \"\"\"\n Start a HTTP server worker for given service.\n \"\"\"\n import psutil\n import uvicorn\n\n if worker_env:\n env_list: list[dict[str, t.Any]] = json.loads(worker_env)\n if worker_id is not None:\n # worker id from circus starts from 1\n worker_key = worker_id - 1\n if worker_key >= len(env_list):\n raise IndexError(\n f\"Worker ID {worker_id} is out of range, \"\n f\"the maximum worker ID is {len(env_list)}\"\n )\n os.environ.update(env_list[worker_key])\n\n from _bentoml_impl.loader import import_service\n from bentoml._internal.container import BentoMLContainer\n from bentoml._internal.context import server_context\n from bentoml._internal.log import configure_server_logging\n\n if runner_map:\n BentoMLContainer.remote_runner_mapping.set(\n t.cast(t.Dict[str, str], json.loads(runner_map))\n )\n\n service = import_service(bento_identifier)\n\n if service_name and service_name != service.name:\n service = service.find_dependent(service_name)\n server_context.service_type = \"service\"\n else:\n server_context.service_type = \"entry_service\"\n\n if worker_id is not None:\n server_context.worker_index = worker_id\n\n configure_server_logging()\n BentoMLContainer.development_mode.set(development_mode)\n\n if prometheus_dir is not None:\n BentoMLContainer.prometheus_multiproc_dir.set(prometheus_dir)\n server_context.service_name = service.name\n\n asgi_app = service.to_asgi(\n is_main=server_context.service_type == \"entry_service\", init=False\n )\n\n uvicorn_extra_options: dict[str, t.Any] = {}\n if ssl_version is not None:\n uvicorn_extra_options[\"ssl_version\"] = ssl_version\n if ssl_cert_reqs is not None:\n uvicorn_extra_options[\"ssl_cert_reqs\"] = ssl_cert_reqs\n if ssl_ciphers is not None:\n uvicorn_extra_options[\"ssl_ciphers\"] = ssl_ciphers\n\n if psutil.WINDOWS:\n # 1. uvloop is not supported on Windows\n # 2. the default policy for Python > 3.8 on Windows is ProactorEventLoop, which doesn't\n # support listen on a existing socket file descriptors\n # See https://docs.python.org/3.8/library/asyncio-platforms.html#windows\n uvicorn_extra_options[\"loop\"] = \"asyncio\"\n import asyncio\n\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore\n\n uvicorn.run(\n app=asgi_app,\n fd=fd,\n backlog=backlog,\n log_config=None,\n workers=1,\n ssl_certfile=ssl_certfile,\n ssl_keyfile=ssl_keyfile,\n ssl_keyfile_password=ssl_keyfile_password,\n ssl_ca_certs=ssl_ca_certs,\n server_header=False,\n **uvicorn_extra_options,\n )\n\n\nif __name__ == \"__main__\":\n main() # pylint: disable=no-value-for-parameter\n", "path": "src/_bentoml_impl/worker/service.py"}]}
2,647
238
gh_patches_debug_18185
rasdani/github-patches
git_diff
mozilla__bugbug-214
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use the bug snapshot transform in the "uplift" model Depends on #5. </issue> <code> [start of bugbug/models/uplift.py] 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import xgboost 7 from imblearn.under_sampling import RandomUnderSampler 8 from sklearn.compose import ColumnTransformer 9 from sklearn.feature_extraction import DictVectorizer 10 from sklearn.pipeline import Pipeline 11 12 from bugbug import bug_features 13 from bugbug import bugzilla 14 from bugbug.model import Model 15 16 17 class UpliftModel(Model): 18 def __init__(self, lemmatization=False): 19 Model.__init__(self, lemmatization) 20 21 self.sampler = RandomUnderSampler(random_state=0) 22 23 feature_extractors = [ 24 bug_features.has_str(), 25 bug_features.has_regression_range(), 26 bug_features.severity(), 27 bug_features.keywords(), 28 bug_features.is_coverity_issue(), 29 bug_features.has_crash_signature(), 30 bug_features.has_url(), 31 bug_features.has_w3c_url(), 32 bug_features.has_github_url(), 33 bug_features.whiteboard(), 34 bug_features.patches(), 35 bug_features.landings(), 36 bug_features.title(), 37 ] 38 39 cleanup_functions = [ 40 bug_features.cleanup_fileref, 41 bug_features.cleanup_url, 42 bug_features.cleanup_synonyms, 43 ] 44 45 self.extraction_pipeline = Pipeline([ 46 ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions)), 47 ('union', ColumnTransformer([ 48 ('data', DictVectorizer(), 'data'), 49 50 ('title', self.text_vectorizer(), 'title'), 51 52 ('comments', self.text_vectorizer(), 'comments'), 53 ])), 54 ]) 55 56 self.clf = xgboost.XGBClassifier(n_jobs=16) 57 self.clf.set_params(predictor='cpu_predictor') 58 59 def get_labels(self): 60 classes = {} 61 62 for bug_data in bugzilla.get_bugs(): 63 bug_id = int(bug_data['id']) 64 65 for attachment in bug_data['attachments']: 66 for flag in attachment['flags']: 67 if not flag['name'].startswith('approval-mozilla-') or flag['status'] not in ['+', '-']: 68 continue 69 70 if flag['status'] == '+': 71 classes[bug_id] = 1 72 elif flag['status'] == '-': 73 classes[bug_id] = 0 74 75 return classes 76 77 def get_feature_names(self): 78 return self.extraction_pipeline.named_steps['union'].get_feature_names() 79 [end of bugbug/models/uplift.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bugbug/models/uplift.py b/bugbug/models/uplift.py --- a/bugbug/models/uplift.py +++ b/bugbug/models/uplift.py @@ -43,7 +43,7 @@ ] self.extraction_pipeline = Pipeline([ - ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions)), + ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions, rollback=True, rollback_when=self.rollback)), ('union', ColumnTransformer([ ('data', DictVectorizer(), 'data'), @@ -56,6 +56,9 @@ self.clf = xgboost.XGBClassifier(n_jobs=16) self.clf.set_params(predictor='cpu_predictor') + def rollback(self, change): + return (change['field_name'] == 'flagtypes.name' and change['added'].startswith('approval-mozilla-') and (change['added'].endswith('+') or change['added'].endswith('-'))) + def get_labels(self): classes = {}
{"golden_diff": "diff --git a/bugbug/models/uplift.py b/bugbug/models/uplift.py\n--- a/bugbug/models/uplift.py\n+++ b/bugbug/models/uplift.py\n@@ -43,7 +43,7 @@\n ]\n \n self.extraction_pipeline = Pipeline([\n- ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions)),\n+ ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions, rollback=True, rollback_when=self.rollback)),\n ('union', ColumnTransformer([\n ('data', DictVectorizer(), 'data'),\n \n@@ -56,6 +56,9 @@\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor='cpu_predictor')\n \n+ def rollback(self, change):\n+ return (change['field_name'] == 'flagtypes.name' and change['added'].startswith('approval-mozilla-') and (change['added'].endswith('+') or change['added'].endswith('-')))\n+\n def get_labels(self):\n classes = {}\n", "issue": "Use the bug snapshot transform in the \"uplift\" model\nDepends on #5.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import bugzilla\nfrom bugbug.model import Model\n\n\nclass UpliftModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n ]\n\n cleanup_functions = [\n bug_features.cleanup_fileref,\n bug_features.cleanup_url,\n bug_features.cleanup_synonyms,\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions)),\n ('union', ColumnTransformer([\n ('data', DictVectorizer(), 'data'),\n\n ('title', self.text_vectorizer(), 'title'),\n\n ('comments', self.text_vectorizer(), 'comments'),\n ])),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor='cpu_predictor')\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs():\n bug_id = int(bug_data['id'])\n\n for attachment in bug_data['attachments']:\n for flag in attachment['flags']:\n if not flag['name'].startswith('approval-mozilla-') or flag['status'] not in ['+', '-']:\n continue\n\n if flag['status'] == '+':\n classes[bug_id] = 1\n elif flag['status'] == '-':\n classes[bug_id] = 0\n\n return classes\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps['union'].get_feature_names()\n", "path": "bugbug/models/uplift.py"}]}
1,249
231
gh_patches_debug_35982
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-260
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> gRPC server instrumentation creates multiple traces on streaming requests **Environment** Current `master` code, basically the sample code in the documentation, testing with a unary request vs. a streaming request. **Steps to reproduce** Create a simple gRPC servicer with two RPCs, one which returns a single message (the unary response), and one which yields items in a list for a streaming response. The key here is to make an instrumented request within the primary request handler (I'm using a simple HTTP get with the Requests instrumentation), so you get an _additional_ span which should be attached to the same trace. **What is the expected behavior?** A single trace with the main span, and a second child span for the HTTP request. **What is the actual behavior?** Two separate traces, each containing a single span. **Additional context** The problem _only_ occurs on streaming requests - I'm sure the reworking I did as part of https://github.com/open-telemetry/opentelemetry-python/pull/1171 is where the problem started, I didn't take into account the streaming case specifically with multiple spans, and naturally, there are no tests for anything streaming, only unary responses. So as part of this, we'll need some useful tests as well. I'll see if I can write up my test case as an actual test case. And again, I've got a vested interest in this working, so I'll have a PR up soon. </issue> <code> [start of instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # pylint:disable=relative-beyond-top-level 16 # pylint:disable=arguments-differ 17 # pylint:disable=no-member 18 # pylint:disable=signature-differs 19 20 """ 21 Implementation of the service-side open-telemetry interceptor. 22 """ 23 24 import logging 25 from contextlib import contextmanager 26 27 import grpc 28 29 from opentelemetry import propagators, trace 30 from opentelemetry.context import attach, detach 31 from opentelemetry.trace.propagation.textmap import DictGetter 32 from opentelemetry.trace.status import Status, StatusCode 33 34 logger = logging.getLogger(__name__) 35 36 37 # wrap an RPC call 38 # see https://github.com/grpc/grpc/issues/18191 39 def _wrap_rpc_behavior(handler, continuation): 40 if handler is None: 41 return None 42 43 if handler.request_streaming and handler.response_streaming: 44 behavior_fn = handler.stream_stream 45 handler_factory = grpc.stream_stream_rpc_method_handler 46 elif handler.request_streaming and not handler.response_streaming: 47 behavior_fn = handler.stream_unary 48 handler_factory = grpc.stream_unary_rpc_method_handler 49 elif not handler.request_streaming and handler.response_streaming: 50 behavior_fn = handler.unary_stream 51 handler_factory = grpc.unary_stream_rpc_method_handler 52 else: 53 behavior_fn = handler.unary_unary 54 handler_factory = grpc.unary_unary_rpc_method_handler 55 56 return handler_factory( 57 continuation( 58 behavior_fn, handler.request_streaming, handler.response_streaming 59 ), 60 request_deserializer=handler.request_deserializer, 61 response_serializer=handler.response_serializer, 62 ) 63 64 65 # pylint:disable=abstract-method 66 class _OpenTelemetryServicerContext(grpc.ServicerContext): 67 def __init__(self, servicer_context, active_span): 68 self._servicer_context = servicer_context 69 self._active_span = active_span 70 self.code = grpc.StatusCode.OK 71 self.details = None 72 super().__init__() 73 74 def is_active(self, *args, **kwargs): 75 return self._servicer_context.is_active(*args, **kwargs) 76 77 def time_remaining(self, *args, **kwargs): 78 return self._servicer_context.time_remaining(*args, **kwargs) 79 80 def cancel(self, *args, **kwargs): 81 return self._servicer_context.cancel(*args, **kwargs) 82 83 def add_callback(self, *args, **kwargs): 84 return self._servicer_context.add_callback(*args, **kwargs) 85 86 def disable_next_message_compression(self): 87 return self._service_context.disable_next_message_compression() 88 89 def invocation_metadata(self, *args, **kwargs): 90 return self._servicer_context.invocation_metadata(*args, **kwargs) 91 92 def peer(self): 93 return self._servicer_context.peer() 94 95 def peer_identities(self): 96 return self._servicer_context.peer_identities() 97 98 def peer_identity_key(self): 99 return self._servicer_context.peer_identity_key() 100 101 def auth_context(self): 102 return self._servicer_context.auth_context() 103 104 def set_compression(self, compression): 105 return self._servicer_context.set_compression(compression) 106 107 def send_initial_metadata(self, *args, **kwargs): 108 return self._servicer_context.send_initial_metadata(*args, **kwargs) 109 110 def set_trailing_metadata(self, *args, **kwargs): 111 return self._servicer_context.set_trailing_metadata(*args, **kwargs) 112 113 def abort(self, code, details): 114 self.code = code 115 self.details = details 116 self._active_span.set_attribute("rpc.grpc.status_code", code.value[0]) 117 self._active_span.set_status( 118 Status( 119 status_code=StatusCode.ERROR, 120 description="{}:{}".format(code, details), 121 ) 122 ) 123 return self._servicer_context.abort(code, details) 124 125 def abort_with_status(self, status): 126 return self._servicer_context.abort_with_status(status) 127 128 def set_code(self, code): 129 self.code = code 130 # use details if we already have it, otherwise the status description 131 details = self.details or code.value[1] 132 self._active_span.set_attribute("rpc.grpc.status_code", code.value[0]) 133 if code != grpc.StatusCode.OK: 134 self._active_span.set_status( 135 Status( 136 status_code=StatusCode.ERROR, 137 description="{}:{}".format(code, details), 138 ) 139 ) 140 return self._servicer_context.set_code(code) 141 142 def set_details(self, details): 143 self.details = details 144 if self.code != grpc.StatusCode.OK: 145 self._active_span.set_status( 146 Status( 147 status_code=StatusCode.ERROR, 148 description="{}:{}".format(self.code, details), 149 ) 150 ) 151 return self._servicer_context.set_details(details) 152 153 154 # pylint:disable=abstract-method 155 # pylint:disable=no-self-use 156 # pylint:disable=unused-argument 157 class OpenTelemetryServerInterceptor(grpc.ServerInterceptor): 158 """ 159 A gRPC server interceptor, to add OpenTelemetry. 160 161 Usage:: 162 163 tracer = some OpenTelemetry tracer 164 165 interceptors = [ 166 OpenTelemetryServerInterceptor(tracer), 167 ] 168 169 server = grpc.server( 170 futures.ThreadPoolExecutor(max_workers=concurrency), 171 interceptors = interceptors) 172 173 """ 174 175 def __init__(self, tracer): 176 self._tracer = tracer 177 self._carrier_getter = DictGetter() 178 179 @contextmanager 180 def _set_remote_context(self, servicer_context): 181 metadata = servicer_context.invocation_metadata() 182 if metadata: 183 md_dict = {md.key: md.value for md in metadata} 184 ctx = propagators.extract(self._carrier_getter, md_dict) 185 token = attach(ctx) 186 try: 187 yield 188 finally: 189 detach(token) 190 else: 191 yield 192 193 def _start_span(self, handler_call_details, context): 194 195 # standard attributes 196 attributes = { 197 "rpc.system": "grpc", 198 "rpc.grpc.status_code": grpc.StatusCode.OK.value[0], 199 } 200 201 # if we have details about the call, split into service and method 202 if handler_call_details.method: 203 service, method = handler_call_details.method.lstrip("/").split( 204 "/", 1 205 ) 206 attributes.update({"rpc.method": method, "rpc.service": service}) 207 208 # add some attributes from the metadata 209 metadata = dict(context.invocation_metadata()) 210 if "user-agent" in metadata: 211 attributes["rpc.user_agent"] = metadata["user-agent"] 212 213 # Split up the peer to keep with how other telemetry sources 214 # do it. This looks like: 215 # * ipv6:[::1]:57284 216 # * ipv4:127.0.0.1:57284 217 # * ipv4:10.2.1.1:57284,127.0.0.1:57284 218 # 219 try: 220 ip, port = ( 221 context.peer().split(",")[0].split(":", 1)[1].rsplit(":", 1) 222 ) 223 attributes.update({"net.peer.ip": ip, "net.peer.port": port}) 224 225 # other telemetry sources add this, so we will too 226 if ip in ("[::1]", "127.0.0.1"): 227 attributes["net.peer.name"] = "localhost" 228 229 except IndexError: 230 logger.warning("Failed to parse peer address '%s'", context.peer()) 231 232 return self._tracer.start_as_current_span( 233 name=handler_call_details.method, 234 kind=trace.SpanKind.SERVER, 235 attributes=attributes, 236 ) 237 238 def intercept_service(self, continuation, handler_call_details): 239 def telemetry_wrapper(behavior, request_streaming, response_streaming): 240 def telemetry_interceptor(request_or_iterator, context): 241 242 with self._set_remote_context(context): 243 with self._start_span( 244 handler_call_details, context 245 ) as span: 246 # wrap the context 247 context = _OpenTelemetryServicerContext(context, span) 248 249 # And now we run the actual RPC. 250 try: 251 return behavior(request_or_iterator, context) 252 except Exception as error: 253 # Bare exceptions are likely to be gRPC aborts, which 254 # we handle in our context wrapper. 255 # Here, we're interested in uncaught exceptions. 256 # pylint:disable=unidiomatic-typecheck 257 if type(error) != Exception: 258 span.record_exception(error) 259 raise error 260 261 return telemetry_interceptor 262 263 return _wrap_rpc_behavior( 264 continuation(handler_call_details), telemetry_wrapper 265 ) 266 [end of instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py --- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py @@ -239,6 +239,15 @@ def telemetry_wrapper(behavior, request_streaming, response_streaming): def telemetry_interceptor(request_or_iterator, context): + # handle streaming responses specially + if response_streaming: + return self._intercept_server_stream( + behavior, + handler_call_details, + request_or_iterator, + context, + ) + with self._set_remote_context(context): with self._start_span( handler_call_details, context @@ -249,6 +258,7 @@ # And now we run the actual RPC. try: return behavior(request_or_iterator, context) + except Exception as error: # Bare exceptions are likely to be gRPC aborts, which # we handle in our context wrapper. @@ -263,3 +273,23 @@ return _wrap_rpc_behavior( continuation(handler_call_details), telemetry_wrapper ) + + # Handle streaming responses separately - we have to do this + # to return a *new* generator or various upstream things + # get confused, or we'll lose the consistent trace + def _intercept_server_stream( + self, behavior, handler_call_details, request_or_iterator, context + ): + + with self._set_remote_context(context): + with self._start_span(handler_call_details, context) as span: + context = _OpenTelemetryServicerContext(context, span) + + try: + yield from behavior(request_or_iterator, context) + + except Exception as error: + # pylint:disable=unidiomatic-typecheck + if type(error) != Exception: + span.record_exception(error) + raise error
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py\n--- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py\n+++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py\n@@ -239,6 +239,15 @@\n def telemetry_wrapper(behavior, request_streaming, response_streaming):\n def telemetry_interceptor(request_or_iterator, context):\n \n+ # handle streaming responses specially\n+ if response_streaming:\n+ return self._intercept_server_stream(\n+ behavior,\n+ handler_call_details,\n+ request_or_iterator,\n+ context,\n+ )\n+\n with self._set_remote_context(context):\n with self._start_span(\n handler_call_details, context\n@@ -249,6 +258,7 @@\n # And now we run the actual RPC.\n try:\n return behavior(request_or_iterator, context)\n+\n except Exception as error:\n # Bare exceptions are likely to be gRPC aborts, which\n # we handle in our context wrapper.\n@@ -263,3 +273,23 @@\n return _wrap_rpc_behavior(\n continuation(handler_call_details), telemetry_wrapper\n )\n+\n+ # Handle streaming responses separately - we have to do this\n+ # to return a *new* generator or various upstream things\n+ # get confused, or we'll lose the consistent trace\n+ def _intercept_server_stream(\n+ self, behavior, handler_call_details, request_or_iterator, context\n+ ):\n+\n+ with self._set_remote_context(context):\n+ with self._start_span(handler_call_details, context) as span:\n+ context = _OpenTelemetryServicerContext(context, span)\n+\n+ try:\n+ yield from behavior(request_or_iterator, context)\n+\n+ except Exception as error:\n+ # pylint:disable=unidiomatic-typecheck\n+ if type(error) != Exception:\n+ span.record_exception(error)\n+ raise error\n", "issue": "gRPC server instrumentation creates multiple traces on streaming requests\n**Environment**\r\nCurrent `master` code, basically the sample code in the documentation, testing with a unary request vs. a streaming request.\r\n\r\n**Steps to reproduce**\r\nCreate a simple gRPC servicer with two RPCs, one which returns a single message (the unary response), and one which yields items in a list for a streaming response.\r\n\r\nThe key here is to make an instrumented request within the primary request handler (I'm using a simple HTTP get with the Requests instrumentation), so you get an _additional_ span which should be attached to the same trace.\r\n\r\n**What is the expected behavior?**\r\nA single trace with the main span, and a second child span for the HTTP request.\r\n\r\n**What is the actual behavior?**\r\nTwo separate traces, each containing a single span.\r\n\r\n**Additional context**\r\nThe problem _only_ occurs on streaming requests - I'm sure the reworking I did as part of https://github.com/open-telemetry/opentelemetry-python/pull/1171 is where the problem started, I didn't take into account the streaming case specifically with multiple spans, and naturally, there are no tests for anything streaming, only unary responses.\r\n\r\nSo as part of this, we'll need some useful tests as well. I'll see if I can write up my test case as an actual test case.\r\n\r\nAnd again, I've got a vested interest in this working, so I'll have a PR up soon.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint:disable=relative-beyond-top-level\n# pylint:disable=arguments-differ\n# pylint:disable=no-member\n# pylint:disable=signature-differs\n\n\"\"\"\nImplementation of the service-side open-telemetry interceptor.\n\"\"\"\n\nimport logging\nfrom contextlib import contextmanager\n\nimport grpc\n\nfrom opentelemetry import propagators, trace\nfrom opentelemetry.context import attach, detach\nfrom opentelemetry.trace.propagation.textmap import DictGetter\nfrom opentelemetry.trace.status import Status, StatusCode\n\nlogger = logging.getLogger(__name__)\n\n\n# wrap an RPC call\n# see https://github.com/grpc/grpc/issues/18191\ndef _wrap_rpc_behavior(handler, continuation):\n if handler is None:\n return None\n\n if handler.request_streaming and handler.response_streaming:\n behavior_fn = handler.stream_stream\n handler_factory = grpc.stream_stream_rpc_method_handler\n elif handler.request_streaming and not handler.response_streaming:\n behavior_fn = handler.stream_unary\n handler_factory = grpc.stream_unary_rpc_method_handler\n elif not handler.request_streaming and handler.response_streaming:\n behavior_fn = handler.unary_stream\n handler_factory = grpc.unary_stream_rpc_method_handler\n else:\n behavior_fn = handler.unary_unary\n handler_factory = grpc.unary_unary_rpc_method_handler\n\n return handler_factory(\n continuation(\n behavior_fn, handler.request_streaming, handler.response_streaming\n ),\n request_deserializer=handler.request_deserializer,\n response_serializer=handler.response_serializer,\n )\n\n\n# pylint:disable=abstract-method\nclass _OpenTelemetryServicerContext(grpc.ServicerContext):\n def __init__(self, servicer_context, active_span):\n self._servicer_context = servicer_context\n self._active_span = active_span\n self.code = grpc.StatusCode.OK\n self.details = None\n super().__init__()\n\n def is_active(self, *args, **kwargs):\n return self._servicer_context.is_active(*args, **kwargs)\n\n def time_remaining(self, *args, **kwargs):\n return self._servicer_context.time_remaining(*args, **kwargs)\n\n def cancel(self, *args, **kwargs):\n return self._servicer_context.cancel(*args, **kwargs)\n\n def add_callback(self, *args, **kwargs):\n return self._servicer_context.add_callback(*args, **kwargs)\n\n def disable_next_message_compression(self):\n return self._service_context.disable_next_message_compression()\n\n def invocation_metadata(self, *args, **kwargs):\n return self._servicer_context.invocation_metadata(*args, **kwargs)\n\n def peer(self):\n return self._servicer_context.peer()\n\n def peer_identities(self):\n return self._servicer_context.peer_identities()\n\n def peer_identity_key(self):\n return self._servicer_context.peer_identity_key()\n\n def auth_context(self):\n return self._servicer_context.auth_context()\n\n def set_compression(self, compression):\n return self._servicer_context.set_compression(compression)\n\n def send_initial_metadata(self, *args, **kwargs):\n return self._servicer_context.send_initial_metadata(*args, **kwargs)\n\n def set_trailing_metadata(self, *args, **kwargs):\n return self._servicer_context.set_trailing_metadata(*args, **kwargs)\n\n def abort(self, code, details):\n self.code = code\n self.details = details\n self._active_span.set_attribute(\"rpc.grpc.status_code\", code.value[0])\n self._active_span.set_status(\n Status(\n status_code=StatusCode.ERROR,\n description=\"{}:{}\".format(code, details),\n )\n )\n return self._servicer_context.abort(code, details)\n\n def abort_with_status(self, status):\n return self._servicer_context.abort_with_status(status)\n\n def set_code(self, code):\n self.code = code\n # use details if we already have it, otherwise the status description\n details = self.details or code.value[1]\n self._active_span.set_attribute(\"rpc.grpc.status_code\", code.value[0])\n if code != grpc.StatusCode.OK:\n self._active_span.set_status(\n Status(\n status_code=StatusCode.ERROR,\n description=\"{}:{}\".format(code, details),\n )\n )\n return self._servicer_context.set_code(code)\n\n def set_details(self, details):\n self.details = details\n if self.code != grpc.StatusCode.OK:\n self._active_span.set_status(\n Status(\n status_code=StatusCode.ERROR,\n description=\"{}:{}\".format(self.code, details),\n )\n )\n return self._servicer_context.set_details(details)\n\n\n# pylint:disable=abstract-method\n# pylint:disable=no-self-use\n# pylint:disable=unused-argument\nclass OpenTelemetryServerInterceptor(grpc.ServerInterceptor):\n \"\"\"\n A gRPC server interceptor, to add OpenTelemetry.\n\n Usage::\n\n tracer = some OpenTelemetry tracer\n\n interceptors = [\n OpenTelemetryServerInterceptor(tracer),\n ]\n\n server = grpc.server(\n futures.ThreadPoolExecutor(max_workers=concurrency),\n interceptors = interceptors)\n\n \"\"\"\n\n def __init__(self, tracer):\n self._tracer = tracer\n self._carrier_getter = DictGetter()\n\n @contextmanager\n def _set_remote_context(self, servicer_context):\n metadata = servicer_context.invocation_metadata()\n if metadata:\n md_dict = {md.key: md.value for md in metadata}\n ctx = propagators.extract(self._carrier_getter, md_dict)\n token = attach(ctx)\n try:\n yield\n finally:\n detach(token)\n else:\n yield\n\n def _start_span(self, handler_call_details, context):\n\n # standard attributes\n attributes = {\n \"rpc.system\": \"grpc\",\n \"rpc.grpc.status_code\": grpc.StatusCode.OK.value[0],\n }\n\n # if we have details about the call, split into service and method\n if handler_call_details.method:\n service, method = handler_call_details.method.lstrip(\"/\").split(\n \"/\", 1\n )\n attributes.update({\"rpc.method\": method, \"rpc.service\": service})\n\n # add some attributes from the metadata\n metadata = dict(context.invocation_metadata())\n if \"user-agent\" in metadata:\n attributes[\"rpc.user_agent\"] = metadata[\"user-agent\"]\n\n # Split up the peer to keep with how other telemetry sources\n # do it. This looks like:\n # * ipv6:[::1]:57284\n # * ipv4:127.0.0.1:57284\n # * ipv4:10.2.1.1:57284,127.0.0.1:57284\n #\n try:\n ip, port = (\n context.peer().split(\",\")[0].split(\":\", 1)[1].rsplit(\":\", 1)\n )\n attributes.update({\"net.peer.ip\": ip, \"net.peer.port\": port})\n\n # other telemetry sources add this, so we will too\n if ip in (\"[::1]\", \"127.0.0.1\"):\n attributes[\"net.peer.name\"] = \"localhost\"\n\n except IndexError:\n logger.warning(\"Failed to parse peer address '%s'\", context.peer())\n\n return self._tracer.start_as_current_span(\n name=handler_call_details.method,\n kind=trace.SpanKind.SERVER,\n attributes=attributes,\n )\n\n def intercept_service(self, continuation, handler_call_details):\n def telemetry_wrapper(behavior, request_streaming, response_streaming):\n def telemetry_interceptor(request_or_iterator, context):\n\n with self._set_remote_context(context):\n with self._start_span(\n handler_call_details, context\n ) as span:\n # wrap the context\n context = _OpenTelemetryServicerContext(context, span)\n\n # And now we run the actual RPC.\n try:\n return behavior(request_or_iterator, context)\n except Exception as error:\n # Bare exceptions are likely to be gRPC aborts, which\n # we handle in our context wrapper.\n # Here, we're interested in uncaught exceptions.\n # pylint:disable=unidiomatic-typecheck\n if type(error) != Exception:\n span.record_exception(error)\n raise error\n\n return telemetry_interceptor\n\n return _wrap_rpc_behavior(\n continuation(handler_call_details), telemetry_wrapper\n )\n", "path": "instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py"}]}
3,527
485
gh_patches_debug_349
rasdani/github-patches
git_diff
google__turbinia-1070
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Missing sys module import in logger.py Logger module is missing an import statement for 'sys' </issue> <code> [start of turbinia/config/logger.py] 1 # -*- coding: utf-8 -*- 2 # Copyright 2017 Google Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 """Sets up logging.""" 16 17 from __future__ import unicode_literals 18 import logging 19 20 import warnings 21 import logging.handlers 22 import os 23 24 from turbinia import config 25 from turbinia import TurbiniaException 26 27 # Environment variable to look for node name in 28 ENVNODENAME = 'NODE_NAME' 29 30 31 def setup(need_file_handler=True, need_stream_handler=True, log_file_path=None): 32 """Set up logging parameters. 33 34 This will also set the root logger, which is the default logger when a named 35 logger is not specified. We currently use 'turbinia' as the named logger, 36 however some external modules that are called by Turbinia can use the root 37 logger, so we want to be able to optionally configure that as well. 38 """ 39 # Remove known warning about credentials 40 warnings.filterwarnings( 41 'ignore', 'Your application has authenticated using end user credentials') 42 43 logger = logging.getLogger('turbinia') 44 # Eliminate double logging from root logger 45 logger.propagate = False 46 47 # We only need a handler if one of that type doesn't exist already 48 if logger.handlers: 49 for handler in logger.handlers: 50 # Want to do strict type-checking here because is instance will include 51 # subclasses and so won't distinguish between StreamHandlers and 52 # FileHandlers. 53 # pylint: disable=unidiomatic-typecheck 54 if type(handler) == logging.FileHandler: 55 need_file_handler = False 56 57 # pylint: disable=unidiomatic-typecheck 58 if type(handler) == logging.StreamHandler: 59 need_stream_handler = False 60 61 if need_file_handler: 62 try: 63 config.LoadConfig() 64 except TurbiniaException as exception: 65 print( 66 'Could not load config file ({0!s}).\n{1:s}'.format( 67 exception, config.CONFIG_MSG)) 68 sys.exit(1) 69 70 # Check if a user specified log path was provided else create default path 71 if not log_file_path: 72 log_name = os.uname().nodename 73 # Check if NODE_NAME available for GKE setups 74 if ENVNODENAME in os.environ: 75 log_name = log_name + '.{0!s}'.format(os.environ[ENVNODENAME]) 76 log_file_path = os.path.join(config.LOG_DIR, log_name) + '.log' 77 78 file_handler = logging.FileHandler(log_file_path) 79 formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s') 80 file_handler.setFormatter(formatter) 81 file_handler.setLevel(logging.DEBUG) 82 logger.addHandler(file_handler) 83 84 console_handler = logging.StreamHandler() 85 formatter = logging.Formatter( 86 '%(asctime)s [%(levelname)s] %(message)s', "%Y-%m-%d %H:%M:%S") 87 console_handler.setFormatter(formatter) 88 if need_stream_handler: 89 logger.addHandler(console_handler) 90 91 # Configure the root logger to use exactly our handlers because other modules 92 # like PSQ use this, and we want to see log messages from it when executing 93 # from CLI. 94 root_log = logging.getLogger() 95 for handler in root_log.handlers: 96 root_log.removeHandler(handler) 97 root_log.addHandler(console_handler) 98 if need_file_handler: 99 root_log.addHandler(file_handler) 100 101 # Set filelock logging to ERROR due to log spam 102 logging.getLogger("filelock").setLevel(logging.ERROR) 103 [end of turbinia/config/logger.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/turbinia/config/logger.py b/turbinia/config/logger.py --- a/turbinia/config/logger.py +++ b/turbinia/config/logger.py @@ -20,6 +20,7 @@ import warnings import logging.handlers import os +import sys from turbinia import config from turbinia import TurbiniaException
{"golden_diff": "diff --git a/turbinia/config/logger.py b/turbinia/config/logger.py\n--- a/turbinia/config/logger.py\n+++ b/turbinia/config/logger.py\n@@ -20,6 +20,7 @@\n import warnings\n import logging.handlers\n import os\n+import sys\n \n from turbinia import config\n from turbinia import TurbiniaException\n", "issue": "Missing sys module import in logger.py\nLogger module is missing an import statement for 'sys'\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Sets up logging.\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\n\nimport warnings\nimport logging.handlers\nimport os\n\nfrom turbinia import config\nfrom turbinia import TurbiniaException\n\n# Environment variable to look for node name in\nENVNODENAME = 'NODE_NAME'\n\n\ndef setup(need_file_handler=True, need_stream_handler=True, log_file_path=None):\n \"\"\"Set up logging parameters.\n\n This will also set the root logger, which is the default logger when a named\n logger is not specified. We currently use 'turbinia' as the named logger,\n however some external modules that are called by Turbinia can use the root\n logger, so we want to be able to optionally configure that as well.\n \"\"\"\n # Remove known warning about credentials\n warnings.filterwarnings(\n 'ignore', 'Your application has authenticated using end user credentials')\n\n logger = logging.getLogger('turbinia')\n # Eliminate double logging from root logger\n logger.propagate = False\n\n # We only need a handler if one of that type doesn't exist already\n if logger.handlers:\n for handler in logger.handlers:\n # Want to do strict type-checking here because is instance will include\n # subclasses and so won't distinguish between StreamHandlers and\n # FileHandlers.\n # pylint: disable=unidiomatic-typecheck\n if type(handler) == logging.FileHandler:\n need_file_handler = False\n\n # pylint: disable=unidiomatic-typecheck\n if type(handler) == logging.StreamHandler:\n need_stream_handler = False\n\n if need_file_handler:\n try:\n config.LoadConfig()\n except TurbiniaException as exception:\n print(\n 'Could not load config file ({0!s}).\\n{1:s}'.format(\n exception, config.CONFIG_MSG))\n sys.exit(1)\n\n # Check if a user specified log path was provided else create default path\n if not log_file_path:\n log_name = os.uname().nodename\n # Check if NODE_NAME available for GKE setups\n if ENVNODENAME in os.environ:\n log_name = log_name + '.{0!s}'.format(os.environ[ENVNODENAME])\n log_file_path = os.path.join(config.LOG_DIR, log_name) + '.log'\n\n file_handler = logging.FileHandler(log_file_path)\n formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.DEBUG)\n logger.addHandler(file_handler)\n\n console_handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s [%(levelname)s] %(message)s', \"%Y-%m-%d %H:%M:%S\")\n console_handler.setFormatter(formatter)\n if need_stream_handler:\n logger.addHandler(console_handler)\n\n # Configure the root logger to use exactly our handlers because other modules\n # like PSQ use this, and we want to see log messages from it when executing\n # from CLI.\n root_log = logging.getLogger()\n for handler in root_log.handlers:\n root_log.removeHandler(handler)\n root_log.addHandler(console_handler)\n if need_file_handler:\n root_log.addHandler(file_handler)\n\n # Set filelock logging to ERROR due to log spam\n logging.getLogger(\"filelock\").setLevel(logging.ERROR)\n", "path": "turbinia/config/logger.py"}]}
1,618
83
gh_patches_debug_26789
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-5796
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> aws.elasticsearch Error Scanning More Than 5 domains **Describe the bug** When running any elasticsearch policy on an account region with more than 5 elasticsearch domains the policy now bombs out with the error - ```error:An error occurred (ValidationException) when calling the DescribeElasticsearchDomains operation: Please provide a maximum of 5 Elasticsearch domain names to describe.``` **To Reproduce** Create 6 es domains and run an elasticsearch c7n policy, error will occur **Expected behavior** It should chunk the calls into domains of 5 or less **Background (please complete the following information):** - OS: Ubuntu v20 - Python Version: 3.8 - Custodian Version: 0.9.2.0 - Tool Version: [if applicable] - Cloud Provider: aws - Policy: any policy which queries ES - Traceback: ``` [ERROR] 2020-05-22T14:51:25.978Z 9ef7929b-b494-434e-9f9f-dfdfdfdfdfdfd Error while executing policy Traceback (most recent call last): File "/var/task/c7n/policy.py", line 291, in run resources = self.policy.resource_manager.resources() File "/var/task/c7n/query.py", line 466, in resources resources = self.augment(resources) File "/var/task/c7n/query.py", line 521, in augment return self.source.augment(resources) File "/var/task/c7n/resources/elasticsearch.py", line 48, in augment return _augment(domains) File "/var/task/c7n/resources/elasticsearch.py", line 39, in _augment resources = self.manager.retry( File "/var/task/c7n/utils.py", line 373, in _retry return func(*args, **kw) File "/var/task/botocore/client.py", line 316, in _api_call return self._make_api_call(operation_name, kwargs) File "/var/task/botocore/client.py", line 635, in _make_api_call raise error_class(parsed_response, operation_name) botocore.errorfactory.ValidationException: An error occurred (ValidationException) when calling the DescribeElasticsearchDomains operation: Please provide a maximum of 5 Elasticsearch domain names to describe. ``` - `custodian version --debug` output **Additional context** Seems to be introduced with 0.9.2.0 </issue> <code> [start of c7n/resources/elasticsearch.py] 1 # Copyright 2016-2017 Capital One Services, LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import jmespath 15 16 from c7n.actions import Action, ModifyVpcSecurityGroupsAction 17 from c7n.filters import MetricsFilter 18 from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter, VpcFilter 19 from c7n.manager import resources 20 from c7n.query import ConfigSource, DescribeSource, QueryResourceManager, TypeInfo 21 from c7n.utils import local_session, type_schema 22 from c7n.tags import Tag, RemoveTag, TagActionFilter, TagDelayedAction 23 24 from .securityhub import PostFinding 25 26 27 class DescribeDomain(DescribeSource): 28 29 def get_resources(self, resource_ids): 30 client = local_session(self.manager.session_factory).client('es') 31 return client.describe_elasticsearch_domains( 32 DomainNames=resource_ids)['DomainStatusList'] 33 34 def augment(self, domains): 35 client = local_session(self.manager.session_factory).client('es') 36 model = self.manager.get_model() 37 38 def _augment(resource_set): 39 resources = self.manager.retry( 40 client.describe_elasticsearch_domains, 41 DomainNames=resource_set)['DomainStatusList'] 42 for r in resources: 43 rarn = self.manager.generate_arn(r[model.id]) 44 r['Tags'] = self.manager.retry( 45 client.list_tags, ARN=rarn).get('TagList', []) 46 return resources 47 48 return _augment(domains) 49 50 51 @resources.register('elasticsearch') 52 class ElasticSearchDomain(QueryResourceManager): 53 54 class resource_type(TypeInfo): 55 service = 'es' 56 arn = 'ARN' 57 arn_type = 'domain' 58 enum_spec = ( 59 'list_domain_names', 'DomainNames[].DomainName', None) 60 id = 'DomainName' 61 name = 'Name' 62 dimension = "DomainName" 63 cfn_type = config_type = 'AWS::Elasticsearch::Domain' 64 65 source_mapping = { 66 'describe': DescribeDomain, 67 'config': ConfigSource 68 } 69 70 71 ElasticSearchDomain.filter_registry.register('marked-for-op', TagActionFilter) 72 73 74 @ElasticSearchDomain.filter_registry.register('subnet') 75 class Subnet(SubnetFilter): 76 77 RelatedIdsExpression = "VPCOptions.SubnetIds[]" 78 79 80 @ElasticSearchDomain.filter_registry.register('security-group') 81 class SecurityGroup(SecurityGroupFilter): 82 83 RelatedIdsExpression = "VPCOptions.SecurityGroupIds[]" 84 85 86 @ElasticSearchDomain.filter_registry.register('vpc') 87 class Vpc(VpcFilter): 88 89 RelatedIdsExpression = "VPCOptions.VPCId" 90 91 92 @ElasticSearchDomain.filter_registry.register('metrics') 93 class Metrics(MetricsFilter): 94 95 def get_dimensions(self, resource): 96 return [{'Name': 'ClientId', 97 'Value': self.manager.account_id}, 98 {'Name': 'DomainName', 99 'Value': resource['DomainName']}] 100 101 102 @ElasticSearchDomain.action_registry.register('post-finding') 103 class ElasticSearchPostFinding(PostFinding): 104 105 resource_type = 'AwsElasticsearchDomain' 106 107 def format_resource(self, r): 108 envelope, payload = self.format_envelope(r) 109 payload.update(self.filter_empty({ 110 'AccessPolicies': r.get('AccessPolicies'), 111 'DomainId': r['DomainId'], 112 'DomainName': r['DomainName'], 113 'Endpoint': r.get('Endpoint'), 114 'Endpoints': r.get('Endpoints'), 115 'DomainEndpointOptions': self.filter_empty({ 116 'EnforceHTTPS': jmespath.search( 117 'DomainEndpointOptions.EnforceHTTPS', r), 118 'TLSSecurityPolicy': jmespath.search( 119 'DomainEndpointOptions.TLSSecurityPolicy', r) 120 }), 121 'ElasticsearchVersion': r['ElasticsearchVersion'], 122 'EncryptionAtRestOptions': self.filter_empty({ 123 'Enabled': jmespath.search( 124 'EncryptionAtRestOptions.Enabled', r), 125 'KmsKeyId': jmespath.search( 126 'EncryptionAtRestOptions.KmsKeyId', r) 127 }), 128 'NodeToNodeEncryptionOptions': self.filter_empty({ 129 'Enabled': jmespath.search( 130 'NodeToNodeEncryptionOptions.Enabled', r) 131 }), 132 'VPCOptions': self.filter_empty({ 133 'AvailabilityZones': jmespath.search( 134 'VPCOptions.AvailabilityZones', r), 135 'SecurityGroupIds': jmespath.search( 136 'VPCOptions.SecurityGroupIds', r), 137 'SubnetIds': jmespath.search('VPCOptions.SubnetIds', r), 138 'VPCId': jmespath.search('VPCOptions.VPCId', r) 139 }) 140 })) 141 return envelope 142 143 144 @ElasticSearchDomain.action_registry.register('modify-security-groups') 145 class ElasticSearchModifySG(ModifyVpcSecurityGroupsAction): 146 """Modify security groups on an Elasticsearch domain""" 147 148 permissions = ('es:UpdateElasticsearchDomainConfig',) 149 150 def process(self, domains): 151 groups = super(ElasticSearchModifySG, self).get_groups(domains) 152 client = local_session(self.manager.session_factory).client('es') 153 154 for dx, d in enumerate(domains): 155 client.update_elasticsearch_domain_config( 156 DomainName=d['DomainName'], 157 VPCOptions={ 158 'SecurityGroupIds': groups[dx]}) 159 160 161 @ElasticSearchDomain.action_registry.register('delete') 162 class Delete(Action): 163 164 schema = type_schema('delete') 165 permissions = ('es:DeleteElasticsearchDomain',) 166 167 def process(self, resources): 168 client = local_session(self.manager.session_factory).client('es') 169 for r in resources: 170 client.delete_elasticsearch_domain(DomainName=r['DomainName']) 171 172 173 @ElasticSearchDomain.action_registry.register('tag') 174 class ElasticSearchAddTag(Tag): 175 """Action to create tag(s) on an existing elasticsearch domain 176 177 :example: 178 179 .. code-block:: yaml 180 181 policies: 182 - name: es-add-tag 183 resource: elasticsearch 184 filters: 185 - "tag:DesiredTag": absent 186 actions: 187 - type: tag 188 key: DesiredTag 189 value: DesiredValue 190 """ 191 permissions = ('es:AddTags',) 192 193 def process_resource_set(self, client, domains, tags): 194 for d in domains: 195 try: 196 client.add_tags(ARN=d['ARN'], TagList=tags) 197 except client.exceptions.ResourceNotFoundExecption: 198 continue 199 200 201 @ElasticSearchDomain.action_registry.register('remove-tag') 202 class ElasticSearchRemoveTag(RemoveTag): 203 """Removes tag(s) on an existing elasticsearch domain 204 205 :example: 206 207 .. code-block:: yaml 208 209 policies: 210 - name: es-remove-tag 211 resource: elasticsearch 212 filters: 213 - "tag:ExpiredTag": present 214 actions: 215 - type: remove-tag 216 tags: ['ExpiredTag'] 217 """ 218 permissions = ('es:RemoveTags',) 219 220 def process_resource_set(self, client, domains, tags): 221 for d in domains: 222 try: 223 client.remove_tags(ARN=d['ARN'], TagKeys=tags) 224 except client.exceptions.ResourceNotFoundExecption: 225 continue 226 227 228 @ElasticSearchDomain.action_registry.register('mark-for-op') 229 class ElasticSearchMarkForOp(TagDelayedAction): 230 """Tag an elasticsearch domain for action later 231 232 :example: 233 234 .. code-block:: yaml 235 236 policies: 237 - name: es-delete-missing 238 resource: elasticsearch 239 filters: 240 - "tag:DesiredTag": absent 241 actions: 242 - type: mark-for-op 243 days: 7 244 op: delete 245 tag: c7n_es_delete 246 """ 247 [end of c7n/resources/elasticsearch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/c7n/resources/elasticsearch.py b/c7n/resources/elasticsearch.py --- a/c7n/resources/elasticsearch.py +++ b/c7n/resources/elasticsearch.py @@ -18,7 +18,7 @@ from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter, VpcFilter from c7n.manager import resources from c7n.query import ConfigSource, DescribeSource, QueryResourceManager, TypeInfo -from c7n.utils import local_session, type_schema +from c7n.utils import chunks, local_session, type_schema from c7n.tags import Tag, RemoveTag, TagActionFilter, TagDelayedAction from .securityhub import PostFinding @@ -34,6 +34,7 @@ def augment(self, domains): client = local_session(self.manager.session_factory).client('es') model = self.manager.get_model() + results = [] def _augment(resource_set): resources = self.manager.retry( @@ -45,7 +46,10 @@ client.list_tags, ARN=rarn).get('TagList', []) return resources - return _augment(domains) + for resource_set in chunks(domains, 5): + results.extend(_augment(resource_set)) + + return results @resources.register('elasticsearch')
{"golden_diff": "diff --git a/c7n/resources/elasticsearch.py b/c7n/resources/elasticsearch.py\n--- a/c7n/resources/elasticsearch.py\n+++ b/c7n/resources/elasticsearch.py\n@@ -18,7 +18,7 @@\n from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter, VpcFilter\n from c7n.manager import resources\n from c7n.query import ConfigSource, DescribeSource, QueryResourceManager, TypeInfo\n-from c7n.utils import local_session, type_schema\n+from c7n.utils import chunks, local_session, type_schema\n from c7n.tags import Tag, RemoveTag, TagActionFilter, TagDelayedAction\n \n from .securityhub import PostFinding\n@@ -34,6 +34,7 @@\n def augment(self, domains):\n client = local_session(self.manager.session_factory).client('es')\n model = self.manager.get_model()\n+ results = []\n \n def _augment(resource_set):\n resources = self.manager.retry(\n@@ -45,7 +46,10 @@\n client.list_tags, ARN=rarn).get('TagList', [])\n return resources\n \n- return _augment(domains)\n+ for resource_set in chunks(domains, 5):\n+ results.extend(_augment(resource_set))\n+\n+ return results\n \n \n @resources.register('elasticsearch')\n", "issue": "aws.elasticsearch Error Scanning More Than 5 domains\n**Describe the bug**\r\nWhen running any elasticsearch policy on an account region with more than 5 elasticsearch domains the policy now bombs out with the error - ```error:An error occurred (ValidationException) when calling the DescribeElasticsearchDomains operation: Please provide a maximum of 5 Elasticsearch domain names to describe.```\r\n\r\n**To Reproduce**\r\nCreate 6 es domains and run an elasticsearch c7n policy, error will occur\r\n\r\n**Expected behavior**\r\nIt should chunk the calls into domains of 5 or less\r\n\r\n\r\n**Background (please complete the following information):**\r\n - OS: Ubuntu v20\r\n - Python Version: 3.8\r\n - Custodian Version: 0.9.2.0\r\n - Tool Version: [if applicable]\r\n - Cloud Provider: aws\r\n - Policy: any policy which queries ES\r\n - Traceback: \r\n```\r\n[ERROR]\t2020-05-22T14:51:25.978Z\t9ef7929b-b494-434e-9f9f-dfdfdfdfdfdfd Error while executing policy\r\nTraceback (most recent call last):\r\n File \"/var/task/c7n/policy.py\", line 291, in run\r\n resources = self.policy.resource_manager.resources()\r\n File \"/var/task/c7n/query.py\", line 466, in resources\r\n resources = self.augment(resources)\r\n File \"/var/task/c7n/query.py\", line 521, in augment\r\n return self.source.augment(resources)\r\n File \"/var/task/c7n/resources/elasticsearch.py\", line 48, in augment\r\n return _augment(domains)\r\n File \"/var/task/c7n/resources/elasticsearch.py\", line 39, in _augment\r\n resources = self.manager.retry(\r\n File \"/var/task/c7n/utils.py\", line 373, in _retry\r\n return func(*args, **kw)\r\n File \"/var/task/botocore/client.py\", line 316, in _api_call\r\n return self._make_api_call(operation_name, kwargs)\r\n File \"/var/task/botocore/client.py\", line 635, in _make_api_call\r\n raise error_class(parsed_response, operation_name)\r\nbotocore.errorfactory.ValidationException: An error occurred (ValidationException) when calling the DescribeElasticsearchDomains operation: Please provide a maximum of 5 Elasticsearch domain names to describe.\r\n```\r\n - `custodian version --debug` output\r\n\r\n**Additional context**\r\nSeems to be introduced with 0.9.2.0\r\n\n", "before_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport jmespath\n\nfrom c7n.actions import Action, ModifyVpcSecurityGroupsAction\nfrom c7n.filters import MetricsFilter\nfrom c7n.filters.vpc import SecurityGroupFilter, SubnetFilter, VpcFilter\nfrom c7n.manager import resources\nfrom c7n.query import ConfigSource, DescribeSource, QueryResourceManager, TypeInfo\nfrom c7n.utils import local_session, type_schema\nfrom c7n.tags import Tag, RemoveTag, TagActionFilter, TagDelayedAction\n\nfrom .securityhub import PostFinding\n\n\nclass DescribeDomain(DescribeSource):\n\n def get_resources(self, resource_ids):\n client = local_session(self.manager.session_factory).client('es')\n return client.describe_elasticsearch_domains(\n DomainNames=resource_ids)['DomainStatusList']\n\n def augment(self, domains):\n client = local_session(self.manager.session_factory).client('es')\n model = self.manager.get_model()\n\n def _augment(resource_set):\n resources = self.manager.retry(\n client.describe_elasticsearch_domains,\n DomainNames=resource_set)['DomainStatusList']\n for r in resources:\n rarn = self.manager.generate_arn(r[model.id])\n r['Tags'] = self.manager.retry(\n client.list_tags, ARN=rarn).get('TagList', [])\n return resources\n\n return _augment(domains)\n\n\n@resources.register('elasticsearch')\nclass ElasticSearchDomain(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'es'\n arn = 'ARN'\n arn_type = 'domain'\n enum_spec = (\n 'list_domain_names', 'DomainNames[].DomainName', None)\n id = 'DomainName'\n name = 'Name'\n dimension = \"DomainName\"\n cfn_type = config_type = 'AWS::Elasticsearch::Domain'\n\n source_mapping = {\n 'describe': DescribeDomain,\n 'config': ConfigSource\n }\n\n\nElasticSearchDomain.filter_registry.register('marked-for-op', TagActionFilter)\n\n\n@ElasticSearchDomain.filter_registry.register('subnet')\nclass Subnet(SubnetFilter):\n\n RelatedIdsExpression = \"VPCOptions.SubnetIds[]\"\n\n\n@ElasticSearchDomain.filter_registry.register('security-group')\nclass SecurityGroup(SecurityGroupFilter):\n\n RelatedIdsExpression = \"VPCOptions.SecurityGroupIds[]\"\n\n\n@ElasticSearchDomain.filter_registry.register('vpc')\nclass Vpc(VpcFilter):\n\n RelatedIdsExpression = \"VPCOptions.VPCId\"\n\n\n@ElasticSearchDomain.filter_registry.register('metrics')\nclass Metrics(MetricsFilter):\n\n def get_dimensions(self, resource):\n return [{'Name': 'ClientId',\n 'Value': self.manager.account_id},\n {'Name': 'DomainName',\n 'Value': resource['DomainName']}]\n\n\n@ElasticSearchDomain.action_registry.register('post-finding')\nclass ElasticSearchPostFinding(PostFinding):\n\n resource_type = 'AwsElasticsearchDomain'\n\n def format_resource(self, r):\n envelope, payload = self.format_envelope(r)\n payload.update(self.filter_empty({\n 'AccessPolicies': r.get('AccessPolicies'),\n 'DomainId': r['DomainId'],\n 'DomainName': r['DomainName'],\n 'Endpoint': r.get('Endpoint'),\n 'Endpoints': r.get('Endpoints'),\n 'DomainEndpointOptions': self.filter_empty({\n 'EnforceHTTPS': jmespath.search(\n 'DomainEndpointOptions.EnforceHTTPS', r),\n 'TLSSecurityPolicy': jmespath.search(\n 'DomainEndpointOptions.TLSSecurityPolicy', r)\n }),\n 'ElasticsearchVersion': r['ElasticsearchVersion'],\n 'EncryptionAtRestOptions': self.filter_empty({\n 'Enabled': jmespath.search(\n 'EncryptionAtRestOptions.Enabled', r),\n 'KmsKeyId': jmespath.search(\n 'EncryptionAtRestOptions.KmsKeyId', r)\n }),\n 'NodeToNodeEncryptionOptions': self.filter_empty({\n 'Enabled': jmespath.search(\n 'NodeToNodeEncryptionOptions.Enabled', r)\n }),\n 'VPCOptions': self.filter_empty({\n 'AvailabilityZones': jmespath.search(\n 'VPCOptions.AvailabilityZones', r),\n 'SecurityGroupIds': jmespath.search(\n 'VPCOptions.SecurityGroupIds', r),\n 'SubnetIds': jmespath.search('VPCOptions.SubnetIds', r),\n 'VPCId': jmespath.search('VPCOptions.VPCId', r)\n })\n }))\n return envelope\n\n\n@ElasticSearchDomain.action_registry.register('modify-security-groups')\nclass ElasticSearchModifySG(ModifyVpcSecurityGroupsAction):\n \"\"\"Modify security groups on an Elasticsearch domain\"\"\"\n\n permissions = ('es:UpdateElasticsearchDomainConfig',)\n\n def process(self, domains):\n groups = super(ElasticSearchModifySG, self).get_groups(domains)\n client = local_session(self.manager.session_factory).client('es')\n\n for dx, d in enumerate(domains):\n client.update_elasticsearch_domain_config(\n DomainName=d['DomainName'],\n VPCOptions={\n 'SecurityGroupIds': groups[dx]})\n\n\n@ElasticSearchDomain.action_registry.register('delete')\nclass Delete(Action):\n\n schema = type_schema('delete')\n permissions = ('es:DeleteElasticsearchDomain',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('es')\n for r in resources:\n client.delete_elasticsearch_domain(DomainName=r['DomainName'])\n\n\n@ElasticSearchDomain.action_registry.register('tag')\nclass ElasticSearchAddTag(Tag):\n \"\"\"Action to create tag(s) on an existing elasticsearch domain\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: es-add-tag\n resource: elasticsearch\n filters:\n - \"tag:DesiredTag\": absent\n actions:\n - type: tag\n key: DesiredTag\n value: DesiredValue\n \"\"\"\n permissions = ('es:AddTags',)\n\n def process_resource_set(self, client, domains, tags):\n for d in domains:\n try:\n client.add_tags(ARN=d['ARN'], TagList=tags)\n except client.exceptions.ResourceNotFoundExecption:\n continue\n\n\n@ElasticSearchDomain.action_registry.register('remove-tag')\nclass ElasticSearchRemoveTag(RemoveTag):\n \"\"\"Removes tag(s) on an existing elasticsearch domain\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: es-remove-tag\n resource: elasticsearch\n filters:\n - \"tag:ExpiredTag\": present\n actions:\n - type: remove-tag\n tags: ['ExpiredTag']\n \"\"\"\n permissions = ('es:RemoveTags',)\n\n def process_resource_set(self, client, domains, tags):\n for d in domains:\n try:\n client.remove_tags(ARN=d['ARN'], TagKeys=tags)\n except client.exceptions.ResourceNotFoundExecption:\n continue\n\n\n@ElasticSearchDomain.action_registry.register('mark-for-op')\nclass ElasticSearchMarkForOp(TagDelayedAction):\n \"\"\"Tag an elasticsearch domain for action later\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: es-delete-missing\n resource: elasticsearch\n filters:\n - \"tag:DesiredTag\": absent\n actions:\n - type: mark-for-op\n days: 7\n op: delete\n tag: c7n_es_delete\n \"\"\"\n", "path": "c7n/resources/elasticsearch.py"}]}
3,512
292
gh_patches_debug_29382
rasdani/github-patches
git_diff
uclapi__uclapi-568
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove opbeat Opbeat is no longer a thing, so we should remove it and replace it with something else? Maybe double down on Sentry and integrate that more tightly https://github.com/uclapi/uclapi/blob/81b90305f9316b020664b32c2436e27ab957e8a7/backend/uclapi/requirements.txt#L33 </issue> <code> [start of backend/uclapi/uclapi/celery.py] 1 from __future__ import absolute_import, unicode_literals 2 3 import celery 4 import os 5 import raven 6 7 from django.conf import settings 8 from raven.contrib.celery import register_signal as raven_register_signal, \ 9 register_logger_signal as raven_register_logger_signal 10 11 from common.helpers import read_dotenv 12 13 read_dotenv(os.path.join(os.path.dirname(os.path.dirname(__file__)), '.env')) 14 15 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uclapi.settings') 16 17 18 class Celery(celery.Celery): 19 def on_configure(self): 20 if os.environ.get("SENTRY_DSN") is not None: 21 client = raven.Client(os.environ.get("SENTRY_DSN")) 22 raven_register_logger_signal(client) 23 raven_register_signal(client) 24 25 26 app = Celery('uclapi') 27 28 app.config_from_object('django.conf.settings', namespace='CELERY') 29 30 31 from opbeat.contrib.django.models import \ 32 register_handlers as opbeat_register_handlers, \ 33 logger as opbeat_logger # noqa: E402# 34 35 from opbeat.contrib.celery import \ 36 register_signal as opbeat_register_signal # noqa: E402 37 38 39 try: 40 opbeat_register_signal(app) 41 except Exception as e: 42 opbeat_logger.exception('Failed installing celery hook: %s' % e) 43 44 if 'opbeat.contrib.django' in settings.INSTALLED_APPS: 45 opbeat_register_handlers() 46 47 app.autodiscover_tasks() 48 49 50 @app.task(bind=True) 51 def task(self): 52 print('Request: {0!r}'.format(self.request)) 53 [end of backend/uclapi/uclapi/celery.py] [start of backend/uclapi/uclapi/settings.py] 1 """ 2 Django settings for uclapi project. 3 4 Generated by 'django-admin startproject' using Django 1.10.4. 5 6 For more information on this file, see 7 https://docs.djangoproject.com/en/1.10/topics/settings/ 8 9 For the full list of settings and their values, see 10 https://docs.djangoproject.com/en/1.10/ref/settings/ 11 """ 12 13 import os 14 import requests 15 from distutils.util import strtobool 16 17 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 18 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 19 20 21 # Quick-start development settings - unsuitable for production 22 # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ 23 24 # SECURITY WARNING: keep the secret key used in production secret! 25 SECRET_KEY = os.environ.get("SECRET_KEY") 26 27 # SECURITY WARNING: don't run with debug turned on in production! 28 # This value should be set by the UCLAPI_PRODUCTION environment 29 # variable anyway. If in production, debug should be false. 30 DEBUG = not strtobool(os.environ.get("UCLAPI_PRODUCTION")) 31 32 ALLOWED_HOSTS = ["localhost"] 33 34 # If a domain is specified then make this an allowed host 35 if os.environ.get("UCLAPI_DOMAIN"): 36 ALLOWED_HOSTS.append(os.environ.get("UCLAPI_DOMAIN")) 37 38 # If we are running under the AWS Elastic Load Balancer then enable internal 39 # requests so that the ELB and Health Checks work 40 if strtobool(os.environ.get("UCLAPI_RUNNING_ON_AWS_ELB")): 41 EC2_PRIVATE_IP = None 42 try: 43 EC2_PRIVATE_IP = requests.get( 44 "http://169.254.169.254/latest/meta-data/local-ipv4", 45 timeout=0.01 46 ).text 47 except requests.exceptions.RequestException: 48 pass 49 50 if EC2_PRIVATE_IP: 51 ALLOWED_HOSTS.append(EC2_PRIVATE_IP) 52 53 # Application definition 54 55 INSTALLED_APPS = [ 56 'django.contrib.admin', 57 'django.contrib.auth', 58 'django.contrib.contenttypes', 59 'django.contrib.sessions', 60 'django.contrib.messages', 61 'django.contrib.staticfiles', 62 'rest_framework', 63 'dashboard', 64 'marketplace', 65 'roombookings', 66 'oauth', 67 'timetable', 68 'common', 69 'opbeat.contrib.django', 70 'raven.contrib.django.raven_compat', 71 'corsheaders', 72 'workspaces' 73 ] 74 75 MIDDLEWARE = [ 76 'opbeat.contrib.django.middleware.OpbeatAPMMiddleware', 77 'django.middleware.security.SecurityMiddleware', 78 'django.contrib.sessions.middleware.SessionMiddleware', 79 'corsheaders.middleware.CorsMiddleware', 80 'django.middleware.common.CommonMiddleware', 81 'django.middleware.csrf.CsrfViewMiddleware', 82 'django.contrib.auth.middleware.AuthenticationMiddleware', 83 'django.contrib.messages.middleware.MessageMiddleware', 84 'django.middleware.clickjacking.XFrameOptionsMiddleware', 85 ] 86 87 if DEBUG: 88 MIDDLEWARE.append( 89 'dashboard.middleware.fake_shibboleth_middleware' 90 '.FakeShibbolethMiddleWare' 91 ) 92 93 ROOT_URLCONF = 'uclapi.urls' 94 95 TEMPLATES = [ 96 { 97 'BACKEND': 'django.template.backends.django.DjangoTemplates', 98 'DIRS': [], 99 'APP_DIRS': True, 100 'OPTIONS': { 101 'context_processors': [ 102 'django.template.context_processors.debug', 103 'django.template.context_processors.request', 104 'django.contrib.auth.context_processors.auth', 105 'django.contrib.messages.context_processors.messages', 106 ], 107 }, 108 }, 109 ] 110 111 WSGI_APPLICATION = 'uclapi.wsgi.application' 112 113 114 # Database 115 # https://docs.djangoproject.com/en/1.10/ref/settings/#databases 116 117 DATABASES = { 118 'default': { 119 'ENGINE': 'django.db.backends.postgresql', 120 'NAME': os.environ.get("DB_UCLAPI_NAME"), 121 'USER': os.environ.get("DB_UCLAPI_USERNAME"), 122 'PASSWORD': os.environ.get("DB_UCLAPI_PASSWORD"), 123 'HOST': os.environ.get("DB_UCLAPI_HOST"), 124 'PORT': os.environ.get("DB_UCLAPI_PORT") 125 }, 126 'roombookings': { 127 'ENGINE': 'django.db.backends.oracle', 128 'NAME': os.environ.get("DB_ROOMS_NAME"), 129 'USER': os.environ.get("DB_ROOMS_USERNAME"), 130 'PASSWORD': os.environ.get("DB_ROOMS_PASSWORD"), 131 'HOST': '', 132 'PORT': '' 133 }, 134 'gencache': { 135 'ENGINE': 'django.db.backends.postgresql', 136 'NAME': os.environ.get("DB_CACHE_NAME"), 137 'USER': os.environ.get("DB_CACHE_USERNAME"), 138 'PASSWORD': os.environ.get("DB_CACHE_PASSWORD"), 139 'HOST': os.environ.get("DB_CACHE_HOST"), 140 'PORT': os.environ.get("DB_CACHE_PORT") 141 } 142 } 143 144 DATABASE_ROUTERS = ['uclapi.dbrouters.ModelRouter'] 145 146 # analytics 147 OPBEAT = { 148 'ORGANIZATION_ID': os.environ.get("OPBEAT_ORG_ID"), 149 'APP_ID': os.environ.get("OPBEAT_APP_ID"), 150 'SECRET_TOKEN': os.environ.get("OPBEAT_SECRET_TOKEN") 151 } 152 153 RAVEN_CONFIG = { 154 'dsn': os.environ.get("SENTRY_DSN"), 155 } 156 157 158 # Password validation 159 # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators 160 161 AUTH_PASSWORD_VALIDATORS = [ 162 { 163 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa 164 }, 165 { 166 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa 167 }, 168 { 169 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa 170 }, 171 { 172 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa 173 }, 174 ] 175 176 177 # Internationalization 178 # https://docs.djangoproject.com/en/1.10/topics/i18n/ 179 180 LANGUAGE_CODE = 'en-us' 181 182 TIME_ZONE = 'UTC' 183 184 USE_I18N = True 185 186 USE_L10N = True 187 188 USE_TZ = False 189 190 # Static files (CSS, JavaScript, Images) 191 # https://docs.djangoproject.com/en/1.10/howto/static-files/ 192 193 STATIC_URL = '/static/' 194 STATIC_ROOT = os.path.join(BASE_DIR, 'static') 195 196 # Cross Origin settings 197 CORS_ORIGIN_ALLOW_ALL = True 198 CORS_URLS_REGEX = r'^/roombookings/.*$' 199 200 # Fair use policy 201 fair_use_policy_path = os.path.join( 202 BASE_DIR, 203 'uclapi/UCLAPIAcceptableUsePolicy.txt' 204 ) 205 with open(fair_use_policy_path, 'r', encoding='utf-8') as fp: 206 FAIR_USE_POLICY = list(fp) 207 208 REDIS_UCLAPI_HOST = os.environ["REDIS_UCLAPI_HOST"] 209 210 # Celery Settings 211 CELERY_BROKER_URL = 'redis://' + REDIS_UCLAPI_HOST 212 CELERY_ACCEPT_CONTENT = ['json'] 213 CELERY_TASK_SERIALIZER = 'json' 214 CELERY_RESULT_SERIALIZER = 'json' 215 216 217 ROOMBOOKINGS_SETID = 'LIVE-17-18' 218 219 # S3 file storage settings 220 # There are three scenarios to consider: 221 # 1) Local development 222 # In local dev, AWS_S3_STATICS = False 223 # AWS_S3_STATICS_CREDENTIALS_ENABLED = False 224 # These allow you to use local statics using /static/ in the 225 # same way as you would normally. 226 # 2) Production 227 # In prod, AWS_S3_STATICS = True 228 # AWS_S3_STATICS_CREDENTIALS_ENABLED = False 229 # This means that S3 statics will be used, but no creds are 230 # needed on the boxes because web servers should never do 231 # uploads to the remote S3 bucket. 232 # 3) Deployment 233 # In deployment, AWS_S3_STATICS = True 234 # AWS_S3_STATICS_CREDENTIALS_ENABLED = True 235 # This will be done either from CI/CD or from the computer 236 # of a person who has permission to upload new statics to 237 # S3. 238 239 if strtobool(os.environ.get("AWS_S3_STATICS", "False")): 240 DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage' 241 STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage' 242 AWS_STORAGE_BUCKET_NAME = os.environ["AWS_S3_BUCKET_NAME"] 243 AWS_LOCATION = os.environ["AWS_S3_BUCKET_PATH"] 244 AWS_S3_REGION_NAME = os.environ["AWS_S3_REGION"] 245 246 # This is a hack to not require AWS Access Credentials 247 # when the system is running in the Cloud. This avoids us from 248 # needing to store AWS credentials. 249 # https://github.com/jschneier/django-storages/issues/254#issuecomment-329813295 # noqa 250 AWS_S3_CUSTOM_DOMAIN = "{}.s3.amazonaws.com".format( 251 AWS_STORAGE_BUCKET_NAME 252 ) 253 254 # If credentials are enabled, collectstatic can do uploads 255 if strtobool(os.environ["AWS_S3_STATICS_CREDENTIALS_ENABLED"]): 256 AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"] 257 AWS_SECRET_ACCESS_KEY = os.environ["AWS_ACCESS_SECRET"] 258 AWS_S3_OBJECT_PARAMETERS = { 259 'CacheControl': 'max-age=86400', 260 } 261 AWS_S3_ENCRYPTION = False 262 else: 263 AWS_QUERYSTRING_AUTH = False 264 [end of backend/uclapi/uclapi/settings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend/uclapi/uclapi/celery.py b/backend/uclapi/uclapi/celery.py --- a/backend/uclapi/uclapi/celery.py +++ b/backend/uclapi/uclapi/celery.py @@ -27,23 +27,6 @@ app.config_from_object('django.conf.settings', namespace='CELERY') - -from opbeat.contrib.django.models import \ - register_handlers as opbeat_register_handlers, \ - logger as opbeat_logger # noqa: E402# - -from opbeat.contrib.celery import \ - register_signal as opbeat_register_signal # noqa: E402 - - -try: - opbeat_register_signal(app) -except Exception as e: - opbeat_logger.exception('Failed installing celery hook: %s' % e) - -if 'opbeat.contrib.django' in settings.INSTALLED_APPS: - opbeat_register_handlers() - app.autodiscover_tasks() diff --git a/backend/uclapi/uclapi/settings.py b/backend/uclapi/uclapi/settings.py --- a/backend/uclapi/uclapi/settings.py +++ b/backend/uclapi/uclapi/settings.py @@ -66,14 +66,12 @@ 'oauth', 'timetable', 'common', - 'opbeat.contrib.django', 'raven.contrib.django.raven_compat', 'corsheaders', 'workspaces' ] MIDDLEWARE = [ - 'opbeat.contrib.django.middleware.OpbeatAPMMiddleware', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'corsheaders.middleware.CorsMiddleware', @@ -143,13 +141,6 @@ DATABASE_ROUTERS = ['uclapi.dbrouters.ModelRouter'] -# analytics -OPBEAT = { - 'ORGANIZATION_ID': os.environ.get("OPBEAT_ORG_ID"), - 'APP_ID': os.environ.get("OPBEAT_APP_ID"), - 'SECRET_TOKEN': os.environ.get("OPBEAT_SECRET_TOKEN") -} - RAVEN_CONFIG = { 'dsn': os.environ.get("SENTRY_DSN"), }
{"golden_diff": "diff --git a/backend/uclapi/uclapi/celery.py b/backend/uclapi/uclapi/celery.py\n--- a/backend/uclapi/uclapi/celery.py\n+++ b/backend/uclapi/uclapi/celery.py\n@@ -27,23 +27,6 @@\n \n app.config_from_object('django.conf.settings', namespace='CELERY')\n \n-\n-from opbeat.contrib.django.models import \\\n- register_handlers as opbeat_register_handlers, \\\n- logger as opbeat_logger # noqa: E402#\n-\n-from opbeat.contrib.celery import \\\n- register_signal as opbeat_register_signal # noqa: E402\n-\n-\n-try:\n- opbeat_register_signal(app)\n-except Exception as e:\n- opbeat_logger.exception('Failed installing celery hook: %s' % e)\n-\n-if 'opbeat.contrib.django' in settings.INSTALLED_APPS:\n- opbeat_register_handlers()\n-\n app.autodiscover_tasks()\n \n \ndiff --git a/backend/uclapi/uclapi/settings.py b/backend/uclapi/uclapi/settings.py\n--- a/backend/uclapi/uclapi/settings.py\n+++ b/backend/uclapi/uclapi/settings.py\n@@ -66,14 +66,12 @@\n 'oauth',\n 'timetable',\n 'common',\n- 'opbeat.contrib.django',\n 'raven.contrib.django.raven_compat',\n 'corsheaders',\n 'workspaces'\n ]\n \n MIDDLEWARE = [\n- 'opbeat.contrib.django.middleware.OpbeatAPMMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n@@ -143,13 +141,6 @@\n \n DATABASE_ROUTERS = ['uclapi.dbrouters.ModelRouter']\n \n-# analytics\n-OPBEAT = {\n- 'ORGANIZATION_ID': os.environ.get(\"OPBEAT_ORG_ID\"),\n- 'APP_ID': os.environ.get(\"OPBEAT_APP_ID\"),\n- 'SECRET_TOKEN': os.environ.get(\"OPBEAT_SECRET_TOKEN\")\n-}\n-\n RAVEN_CONFIG = {\n 'dsn': os.environ.get(\"SENTRY_DSN\"),\n }\n", "issue": "Remove opbeat\nOpbeat is no longer a thing, so we should remove it and replace it with something else? Maybe double down on Sentry and integrate that more tightly\r\n\r\nhttps://github.com/uclapi/uclapi/blob/81b90305f9316b020664b32c2436e27ab957e8a7/backend/uclapi/requirements.txt#L33\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport celery\nimport os\nimport raven\n\nfrom django.conf import settings\nfrom raven.contrib.celery import register_signal as raven_register_signal, \\\n register_logger_signal as raven_register_logger_signal\n\nfrom common.helpers import read_dotenv\n\nread_dotenv(os.path.join(os.path.dirname(os.path.dirname(__file__)), '.env'))\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uclapi.settings')\n\n\nclass Celery(celery.Celery):\n def on_configure(self):\n if os.environ.get(\"SENTRY_DSN\") is not None:\n client = raven.Client(os.environ.get(\"SENTRY_DSN\"))\n raven_register_logger_signal(client)\n raven_register_signal(client)\n\n\napp = Celery('uclapi')\n\napp.config_from_object('django.conf.settings', namespace='CELERY')\n\n\nfrom opbeat.contrib.django.models import \\\n register_handlers as opbeat_register_handlers, \\\n logger as opbeat_logger # noqa: E402#\n\nfrom opbeat.contrib.celery import \\\n register_signal as opbeat_register_signal # noqa: E402\n\n\ntry:\n opbeat_register_signal(app)\nexcept Exception as e:\n opbeat_logger.exception('Failed installing celery hook: %s' % e)\n\nif 'opbeat.contrib.django' in settings.INSTALLED_APPS:\n opbeat_register_handlers()\n\napp.autodiscover_tasks()\n\n\n@app.task(bind=True)\ndef task(self):\n print('Request: {0!r}'.format(self.request))\n", "path": "backend/uclapi/uclapi/celery.py"}, {"content": "\"\"\"\nDjango settings for uclapi project.\n\nGenerated by 'django-admin startproject' using Django 1.10.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\n\nimport os\nimport requests\nfrom distutils.util import strtobool\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\n# This value should be set by the UCLAPI_PRODUCTION environment\n# variable anyway. If in production, debug should be false.\nDEBUG = not strtobool(os.environ.get(\"UCLAPI_PRODUCTION\"))\n\nALLOWED_HOSTS = [\"localhost\"]\n\n# If a domain is specified then make this an allowed host\nif os.environ.get(\"UCLAPI_DOMAIN\"):\n ALLOWED_HOSTS.append(os.environ.get(\"UCLAPI_DOMAIN\"))\n\n# If we are running under the AWS Elastic Load Balancer then enable internal\n# requests so that the ELB and Health Checks work\nif strtobool(os.environ.get(\"UCLAPI_RUNNING_ON_AWS_ELB\")):\n EC2_PRIVATE_IP = None\n try:\n EC2_PRIVATE_IP = requests.get(\n \"http://169.254.169.254/latest/meta-data/local-ipv4\",\n timeout=0.01\n ).text\n except requests.exceptions.RequestException:\n pass\n\n if EC2_PRIVATE_IP:\n ALLOWED_HOSTS.append(EC2_PRIVATE_IP)\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'dashboard',\n 'marketplace',\n 'roombookings',\n 'oauth',\n 'timetable',\n 'common',\n 'opbeat.contrib.django',\n 'raven.contrib.django.raven_compat',\n 'corsheaders',\n 'workspaces'\n]\n\nMIDDLEWARE = [\n 'opbeat.contrib.django.middleware.OpbeatAPMMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nif DEBUG:\n MIDDLEWARE.append(\n 'dashboard.middleware.fake_shibboleth_middleware'\n '.FakeShibbolethMiddleWare'\n )\n\nROOT_URLCONF = 'uclapi.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'uclapi.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get(\"DB_UCLAPI_NAME\"),\n 'USER': os.environ.get(\"DB_UCLAPI_USERNAME\"),\n 'PASSWORD': os.environ.get(\"DB_UCLAPI_PASSWORD\"),\n 'HOST': os.environ.get(\"DB_UCLAPI_HOST\"),\n 'PORT': os.environ.get(\"DB_UCLAPI_PORT\")\n },\n 'roombookings': {\n 'ENGINE': 'django.db.backends.oracle',\n 'NAME': os.environ.get(\"DB_ROOMS_NAME\"),\n 'USER': os.environ.get(\"DB_ROOMS_USERNAME\"),\n 'PASSWORD': os.environ.get(\"DB_ROOMS_PASSWORD\"),\n 'HOST': '',\n 'PORT': ''\n },\n 'gencache': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get(\"DB_CACHE_NAME\"),\n 'USER': os.environ.get(\"DB_CACHE_USERNAME\"),\n 'PASSWORD': os.environ.get(\"DB_CACHE_PASSWORD\"),\n 'HOST': os.environ.get(\"DB_CACHE_HOST\"),\n 'PORT': os.environ.get(\"DB_CACHE_PORT\")\n }\n}\n\nDATABASE_ROUTERS = ['uclapi.dbrouters.ModelRouter']\n\n# analytics\nOPBEAT = {\n 'ORGANIZATION_ID': os.environ.get(\"OPBEAT_ORG_ID\"),\n 'APP_ID': os.environ.get(\"OPBEAT_APP_ID\"),\n 'SECRET_TOKEN': os.environ.get(\"OPBEAT_SECRET_TOKEN\")\n}\n\nRAVEN_CONFIG = {\n 'dsn': os.environ.get(\"SENTRY_DSN\"),\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.10/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.10/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\n# Cross Origin settings\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/roombookings/.*$'\n\n# Fair use policy\nfair_use_policy_path = os.path.join(\n BASE_DIR,\n 'uclapi/UCLAPIAcceptableUsePolicy.txt'\n)\nwith open(fair_use_policy_path, 'r', encoding='utf-8') as fp:\n FAIR_USE_POLICY = list(fp)\n\nREDIS_UCLAPI_HOST = os.environ[\"REDIS_UCLAPI_HOST\"]\n\n# Celery Settings\nCELERY_BROKER_URL = 'redis://' + REDIS_UCLAPI_HOST\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\n\n\nROOMBOOKINGS_SETID = 'LIVE-17-18'\n\n# S3 file storage settings\n# There are three scenarios to consider:\n# 1) Local development\n# In local dev, AWS_S3_STATICS = False\n# AWS_S3_STATICS_CREDENTIALS_ENABLED = False\n# These allow you to use local statics using /static/ in the\n# same way as you would normally.\n# 2) Production\n# In prod, AWS_S3_STATICS = True\n# AWS_S3_STATICS_CREDENTIALS_ENABLED = False\n# This means that S3 statics will be used, but no creds are\n# needed on the boxes because web servers should never do\n# uploads to the remote S3 bucket.\n# 3) Deployment\n# In deployment, AWS_S3_STATICS = True\n# AWS_S3_STATICS_CREDENTIALS_ENABLED = True\n# This will be done either from CI/CD or from the computer\n# of a person who has permission to upload new statics to\n# S3.\n\nif strtobool(os.environ.get(\"AWS_S3_STATICS\", \"False\")):\n DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n AWS_STORAGE_BUCKET_NAME = os.environ[\"AWS_S3_BUCKET_NAME\"]\n AWS_LOCATION = os.environ[\"AWS_S3_BUCKET_PATH\"]\n AWS_S3_REGION_NAME = os.environ[\"AWS_S3_REGION\"]\n\n # This is a hack to not require AWS Access Credentials\n # when the system is running in the Cloud. This avoids us from\n # needing to store AWS credentials.\n # https://github.com/jschneier/django-storages/issues/254#issuecomment-329813295 # noqa\n AWS_S3_CUSTOM_DOMAIN = \"{}.s3.amazonaws.com\".format(\n AWS_STORAGE_BUCKET_NAME\n )\n\n # If credentials are enabled, collectstatic can do uploads\n if strtobool(os.environ[\"AWS_S3_STATICS_CREDENTIALS_ENABLED\"]):\n AWS_ACCESS_KEY_ID = os.environ[\"AWS_ACCESS_KEY_ID\"]\n AWS_SECRET_ACCESS_KEY = os.environ[\"AWS_ACCESS_SECRET\"]\n AWS_S3_OBJECT_PARAMETERS = {\n 'CacheControl': 'max-age=86400',\n }\n AWS_S3_ENCRYPTION = False\n else:\n AWS_QUERYSTRING_AUTH = False\n", "path": "backend/uclapi/uclapi/settings.py"}]}
3,882
487
gh_patches_debug_38644
rasdani/github-patches
git_diff
python-pillow__Pillow-730
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add 16bit encode/decode to JPEG2K (WIP) Add the ability to write 16bit JPEG2k Images (for DICOM medical images). I'd trying to add a decoder too but I couldn't quite figure it out. Here's my dumb test case: ``` python import numpy as np from PIL import Image arr = np.zeros((64, 96), dtype=np.uint16) arr[16:64, 0:32] = 300 img = Image.fromarray(arr, 'I;16') img.save('grr.j2k') # Works img2 = Image.open('grr.j2k') arr2 = np.array(img2) np.testing.assert_almost_equal(arr, arr2) # Fails ``` Thoughts? </issue> <code> [start of PIL/Jpeg2KImagePlugin.py] 1 # 2 # The Python Imaging Library 3 # $Id$ 4 # 5 # JPEG2000 file handling 6 # 7 # History: 8 # 2014-03-12 ajh Created 9 # 10 # Copyright (c) 2014 Coriolis Systems Limited 11 # Copyright (c) 2014 Alastair Houghton 12 # 13 # See the README file for information on usage and redistribution. 14 # 15 16 __version__ = "0.1" 17 18 from PIL import Image, ImageFile 19 import struct 20 import os 21 import io 22 23 24 def _parse_codestream(fp): 25 """Parse the JPEG 2000 codestream to extract the size and component 26 count from the SIZ marker segment, returning a PIL (size, mode) tuple.""" 27 28 hdr = fp.read(2) 29 lsiz = struct.unpack('>H', hdr)[0] 30 siz = hdr + fp.read(lsiz - 2) 31 lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, xtsiz, ytsiz, \ 32 xtosiz, ytosiz, csiz \ 33 = struct.unpack('>HHIIIIIIIIH', siz[:38]) 34 ssiz = [None]*csiz 35 xrsiz = [None]*csiz 36 yrsiz = [None]*csiz 37 for i in range(csiz): 38 ssiz[i], xrsiz[i], yrsiz[i] \ 39 = struct.unpack('>BBB', siz[36 + 3 * i:39 + 3 * i]) 40 41 size = (xsiz - xosiz, ysiz - yosiz) 42 if csiz == 1: 43 mode = 'L' 44 elif csiz == 2: 45 mode = 'LA' 46 elif csiz == 3: 47 mode = 'RGB' 48 elif csiz == 4: 49 mode = 'RGBA' 50 else: 51 mode = None 52 53 return (size, mode) 54 55 56 def _parse_jp2_header(fp): 57 """Parse the JP2 header box to extract size, component count and 58 color space information, returning a PIL (size, mode) tuple.""" 59 60 # Find the JP2 header box 61 header = None 62 while True: 63 lbox, tbox = struct.unpack('>I4s', fp.read(8)) 64 if lbox == 1: 65 lbox = struct.unpack('>Q', fp.read(8))[0] 66 hlen = 16 67 else: 68 hlen = 8 69 70 if tbox == b'jp2h': 71 header = fp.read(lbox - hlen) 72 break 73 else: 74 fp.seek(lbox - hlen, os.SEEK_CUR) 75 76 if header is None: 77 raise SyntaxError('could not find JP2 header') 78 79 size = None 80 mode = None 81 82 hio = io.BytesIO(header) 83 while True: 84 lbox, tbox = struct.unpack('>I4s', hio.read(8)) 85 if lbox == 1: 86 lbox = struct.unpack('>Q', hio.read(8))[0] 87 hlen = 16 88 else: 89 hlen = 8 90 91 content = hio.read(lbox - hlen) 92 93 if tbox == b'ihdr': 94 height, width, nc, bpc, c, unkc, ipr \ 95 = struct.unpack('>IIHBBBB', content) 96 size = (width, height) 97 if unkc: 98 if nc == 1: 99 mode = 'L' 100 elif nc == 2: 101 mode = 'LA' 102 elif nc == 3: 103 mode = 'RGB' 104 elif nc == 4: 105 mode = 'RGBA' 106 break 107 elif tbox == b'colr': 108 meth, prec, approx = struct.unpack('>BBB', content[:3]) 109 if meth == 1: 110 cs = struct.unpack('>I', content[3:7])[0] 111 if cs == 16: # sRGB 112 if nc == 3: 113 mode = 'RGB' 114 elif nc == 4: 115 mode = 'RGBA' 116 break 117 elif cs == 17: # grayscale 118 if nc == 1: 119 mode = 'L' 120 elif nc == 2: 121 mode = 'LA' 122 break 123 elif cs == 18: # sYCC 124 if nc == 3: 125 mode = 'RGB' 126 elif nc == 4: 127 mode = 'RGBA' 128 break 129 130 return (size, mode) 131 132 133 ## 134 # Image plugin for JPEG2000 images. 135 136 class Jpeg2KImageFile(ImageFile.ImageFile): 137 format = "JPEG2000" 138 format_description = "JPEG 2000 (ISO 15444)" 139 140 def _open(self): 141 sig = self.fp.read(4) 142 if sig == b'\xff\x4f\xff\x51': 143 self.codec = "j2k" 144 self.size, self.mode = _parse_codestream(self.fp) 145 else: 146 sig = sig + self.fp.read(8) 147 148 if sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a': 149 self.codec = "jp2" 150 self.size, self.mode = _parse_jp2_header(self.fp) 151 else: 152 raise SyntaxError('not a JPEG 2000 file') 153 154 if self.size is None or self.mode is None: 155 raise SyntaxError('unable to determine size/mode') 156 157 self.reduce = 0 158 self.layers = 0 159 160 fd = -1 161 length = -1 162 163 if hasattr(self.fp, "fileno"): 164 try: 165 fd = self.fp.fileno() 166 length = os.fstat(fd).st_size 167 except: 168 fd = -1 169 elif hasattr(self.fp, "seek"): 170 try: 171 pos = f.tell() 172 f.seek(0, 2) 173 length = f.tell() 174 f.seek(pos, 0) 175 except: 176 length = -1 177 178 self.tile = [('jpeg2k', (0, 0) + self.size, 0, 179 (self.codec, self.reduce, self.layers, fd, length))] 180 181 def load(self): 182 if self.reduce: 183 power = 1 << self.reduce 184 adjust = power >> 1 185 self.size = (int((self.size[0] + adjust) / power), 186 int((self.size[1] + adjust) / power)) 187 188 if self.tile: 189 # Update the reduce and layers settings 190 t = self.tile[0] 191 t3 = (t[3][0], self.reduce, self.layers, t[3][3], t[3][4]) 192 self.tile = [(t[0], (0, 0) + self.size, t[2], t3)] 193 194 ImageFile.ImageFile.load(self) 195 196 197 def _accept(prefix): 198 return (prefix[:4] == b'\xff\x4f\xff\x51' 199 or prefix[:12] == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a') 200 201 202 # ------------------------------------------------------------ 203 # Save support 204 205 def _save(im, fp, filename): 206 if filename.endswith('.j2k'): 207 kind = 'j2k' 208 else: 209 kind = 'jp2' 210 211 # Get the keyword arguments 212 info = im.encoderinfo 213 214 offset = info.get('offset', None) 215 tile_offset = info.get('tile_offset', None) 216 tile_size = info.get('tile_size', None) 217 quality_mode = info.get('quality_mode', 'rates') 218 quality_layers = info.get('quality_layers', None) 219 num_resolutions = info.get('num_resolutions', 0) 220 cblk_size = info.get('codeblock_size', None) 221 precinct_size = info.get('precinct_size', None) 222 irreversible = info.get('irreversible', False) 223 progression = info.get('progression', 'LRCP') 224 cinema_mode = info.get('cinema_mode', 'no') 225 fd = -1 226 227 if hasattr(fp, "fileno"): 228 try: 229 fd = fp.fileno() 230 except: 231 fd = -1 232 233 im.encoderconfig = ( 234 offset, 235 tile_offset, 236 tile_size, 237 quality_mode, 238 quality_layers, 239 num_resolutions, 240 cblk_size, 241 precinct_size, 242 irreversible, 243 progression, 244 cinema_mode, 245 fd 246 ) 247 248 ImageFile._save(im, fp, [('jpeg2k', (0, 0)+im.size, 0, kind)]) 249 250 # ------------------------------------------------------------ 251 # Registry stuff 252 253 Image.register_open('JPEG2000', Jpeg2KImageFile, _accept) 254 Image.register_save('JPEG2000', _save) 255 256 Image.register_extension('JPEG2000', '.jp2') 257 Image.register_extension('JPEG2000', '.j2k') 258 Image.register_extension('JPEG2000', '.jpc') 259 Image.register_extension('JPEG2000', '.jpf') 260 Image.register_extension('JPEG2000', '.jpx') 261 Image.register_extension('JPEG2000', '.j2c') 262 263 Image.register_mime('JPEG2000', 'image/jp2') 264 Image.register_mime('JPEG2000', 'image/jpx') 265 [end of PIL/Jpeg2KImagePlugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PIL/Jpeg2KImagePlugin.py b/PIL/Jpeg2KImagePlugin.py --- a/PIL/Jpeg2KImagePlugin.py +++ b/PIL/Jpeg2KImagePlugin.py @@ -40,7 +40,10 @@ size = (xsiz - xosiz, ysiz - yosiz) if csiz == 1: - mode = 'L' + if (yrsiz[0] & 0x7f) > 8: + mode = 'I;16' + else: + mode = 'L' elif csiz == 2: mode = 'LA' elif csiz == 3: @@ -78,6 +81,7 @@ size = None mode = None + bpc = None hio = io.BytesIO(header) while True: @@ -95,7 +99,9 @@ = struct.unpack('>IIHBBBB', content) size = (width, height) if unkc: - if nc == 1: + if nc == 1 and (bpc & 0x7f) > 8: + mode = 'I;16' + elif nc == 1: mode = 'L' elif nc == 2: mode = 'LA' @@ -109,13 +115,19 @@ if meth == 1: cs = struct.unpack('>I', content[3:7])[0] if cs == 16: # sRGB - if nc == 3: + if nc == 1 and (bpc & 0x7f) > 8: + mode = 'I;16' + elif nc == 1: + mode = 'L' + elif nc == 3: mode = 'RGB' elif nc == 4: mode = 'RGBA' break elif cs == 17: # grayscale - if nc == 1: + if nc == 1 and (bpc & 0x7f) > 8: + mode = 'I;16' + elif nc == 1: mode = 'L' elif nc == 2: mode = 'LA' @@ -129,10 +141,10 @@ return (size, mode) - ## # Image plugin for JPEG2000 images. + class Jpeg2KImageFile(ImageFile.ImageFile): format = "JPEG2000" format_description = "JPEG 2000 (ISO 15444)" @@ -174,7 +186,7 @@ f.seek(pos, 0) except: length = -1 - + self.tile = [('jpeg2k', (0, 0) + self.size, 0, (self.codec, self.reduce, self.layers, fd, length))]
{"golden_diff": "diff --git a/PIL/Jpeg2KImagePlugin.py b/PIL/Jpeg2KImagePlugin.py\n--- a/PIL/Jpeg2KImagePlugin.py\n+++ b/PIL/Jpeg2KImagePlugin.py\n@@ -40,7 +40,10 @@\n \n size = (xsiz - xosiz, ysiz - yosiz)\n if csiz == 1:\n- mode = 'L'\n+ if (yrsiz[0] & 0x7f) > 8:\n+ mode = 'I;16'\n+ else:\n+ mode = 'L'\n elif csiz == 2:\n mode = 'LA'\n elif csiz == 3:\n@@ -78,6 +81,7 @@\n \n size = None\n mode = None\n+ bpc = None\n \n hio = io.BytesIO(header)\n while True:\n@@ -95,7 +99,9 @@\n = struct.unpack('>IIHBBBB', content)\n size = (width, height)\n if unkc:\n- if nc == 1:\n+ if nc == 1 and (bpc & 0x7f) > 8:\n+ mode = 'I;16'\n+ elif nc == 1:\n mode = 'L'\n elif nc == 2:\n mode = 'LA'\n@@ -109,13 +115,19 @@\n if meth == 1:\n cs = struct.unpack('>I', content[3:7])[0]\n if cs == 16: # sRGB\n- if nc == 3:\n+ if nc == 1 and (bpc & 0x7f) > 8:\n+ mode = 'I;16'\n+ elif nc == 1:\n+ mode = 'L'\n+ elif nc == 3:\n mode = 'RGB'\n elif nc == 4:\n mode = 'RGBA'\n break\n elif cs == 17: # grayscale\n- if nc == 1:\n+ if nc == 1 and (bpc & 0x7f) > 8:\n+ mode = 'I;16'\n+ elif nc == 1:\n mode = 'L'\n elif nc == 2:\n mode = 'LA'\n@@ -129,10 +141,10 @@\n \n return (size, mode)\n \n-\n ##\n # Image plugin for JPEG2000 images.\n \n+\n class Jpeg2KImageFile(ImageFile.ImageFile):\n format = \"JPEG2000\"\n format_description = \"JPEG 2000 (ISO 15444)\"\n@@ -174,7 +186,7 @@\n f.seek(pos, 0)\n except:\n length = -1\n- \n+\n self.tile = [('jpeg2k', (0, 0) + self.size, 0,\n (self.codec, self.reduce, self.layers, fd, length))]\n", "issue": "Add 16bit encode/decode to JPEG2K (WIP)\nAdd the ability to write 16bit JPEG2k Images (for DICOM medical images).\n\nI'd trying to add a decoder too but I couldn't quite figure it out. Here's my dumb test case:\n\n``` python\nimport numpy as np\nfrom PIL import Image\n\narr = np.zeros((64, 96), dtype=np.uint16)\narr[16:64, 0:32] = 300\n\nimg = Image.fromarray(arr, 'I;16')\nimg.save('grr.j2k') # Works\n\nimg2 = Image.open('grr.j2k')\narr2 = np.array(img2)\n\nnp.testing.assert_almost_equal(arr, arr2) # Fails\n```\n\nThoughts?\n\n", "before_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# JPEG2000 file handling\n#\n# History:\n# 2014-03-12 ajh Created\n#\n# Copyright (c) 2014 Coriolis Systems Limited\n# Copyright (c) 2014 Alastair Houghton\n#\n# See the README file for information on usage and redistribution.\n#\n\n__version__ = \"0.1\"\n\nfrom PIL import Image, ImageFile\nimport struct\nimport os\nimport io\n\n\ndef _parse_codestream(fp):\n \"\"\"Parse the JPEG 2000 codestream to extract the size and component\n count from the SIZ marker segment, returning a PIL (size, mode) tuple.\"\"\"\n\n hdr = fp.read(2)\n lsiz = struct.unpack('>H', hdr)[0]\n siz = hdr + fp.read(lsiz - 2)\n lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, xtsiz, ytsiz, \\\n xtosiz, ytosiz, csiz \\\n = struct.unpack('>HHIIIIIIIIH', siz[:38])\n ssiz = [None]*csiz\n xrsiz = [None]*csiz\n yrsiz = [None]*csiz\n for i in range(csiz):\n ssiz[i], xrsiz[i], yrsiz[i] \\\n = struct.unpack('>BBB', siz[36 + 3 * i:39 + 3 * i])\n\n size = (xsiz - xosiz, ysiz - yosiz)\n if csiz == 1:\n mode = 'L'\n elif csiz == 2:\n mode = 'LA'\n elif csiz == 3:\n mode = 'RGB'\n elif csiz == 4:\n mode = 'RGBA'\n else:\n mode = None\n\n return (size, mode)\n\n\ndef _parse_jp2_header(fp):\n \"\"\"Parse the JP2 header box to extract size, component count and\n color space information, returning a PIL (size, mode) tuple.\"\"\"\n\n # Find the JP2 header box\n header = None\n while True:\n lbox, tbox = struct.unpack('>I4s', fp.read(8))\n if lbox == 1:\n lbox = struct.unpack('>Q', fp.read(8))[0]\n hlen = 16\n else:\n hlen = 8\n\n if tbox == b'jp2h':\n header = fp.read(lbox - hlen)\n break\n else:\n fp.seek(lbox - hlen, os.SEEK_CUR)\n\n if header is None:\n raise SyntaxError('could not find JP2 header')\n\n size = None\n mode = None\n\n hio = io.BytesIO(header)\n while True:\n lbox, tbox = struct.unpack('>I4s', hio.read(8))\n if lbox == 1:\n lbox = struct.unpack('>Q', hio.read(8))[0]\n hlen = 16\n else:\n hlen = 8\n\n content = hio.read(lbox - hlen)\n\n if tbox == b'ihdr':\n height, width, nc, bpc, c, unkc, ipr \\\n = struct.unpack('>IIHBBBB', content)\n size = (width, height)\n if unkc:\n if nc == 1:\n mode = 'L'\n elif nc == 2:\n mode = 'LA'\n elif nc == 3:\n mode = 'RGB'\n elif nc == 4:\n mode = 'RGBA'\n break\n elif tbox == b'colr':\n meth, prec, approx = struct.unpack('>BBB', content[:3])\n if meth == 1:\n cs = struct.unpack('>I', content[3:7])[0]\n if cs == 16: # sRGB\n if nc == 3:\n mode = 'RGB'\n elif nc == 4:\n mode = 'RGBA'\n break\n elif cs == 17: # grayscale\n if nc == 1:\n mode = 'L'\n elif nc == 2:\n mode = 'LA'\n break\n elif cs == 18: # sYCC\n if nc == 3:\n mode = 'RGB'\n elif nc == 4:\n mode = 'RGBA'\n break\n\n return (size, mode)\n\n\n##\n# Image plugin for JPEG2000 images.\n\nclass Jpeg2KImageFile(ImageFile.ImageFile):\n format = \"JPEG2000\"\n format_description = \"JPEG 2000 (ISO 15444)\"\n\n def _open(self):\n sig = self.fp.read(4)\n if sig == b'\\xff\\x4f\\xff\\x51':\n self.codec = \"j2k\"\n self.size, self.mode = _parse_codestream(self.fp)\n else:\n sig = sig + self.fp.read(8)\n\n if sig == b'\\x00\\x00\\x00\\x0cjP \\x0d\\x0a\\x87\\x0a':\n self.codec = \"jp2\"\n self.size, self.mode = _parse_jp2_header(self.fp)\n else:\n raise SyntaxError('not a JPEG 2000 file')\n\n if self.size is None or self.mode is None:\n raise SyntaxError('unable to determine size/mode')\n\n self.reduce = 0\n self.layers = 0\n\n fd = -1\n length = -1\n\n if hasattr(self.fp, \"fileno\"):\n try:\n fd = self.fp.fileno()\n length = os.fstat(fd).st_size\n except:\n fd = -1\n elif hasattr(self.fp, \"seek\"):\n try:\n pos = f.tell()\n f.seek(0, 2)\n length = f.tell()\n f.seek(pos, 0)\n except:\n length = -1\n \n self.tile = [('jpeg2k', (0, 0) + self.size, 0,\n (self.codec, self.reduce, self.layers, fd, length))]\n\n def load(self):\n if self.reduce:\n power = 1 << self.reduce\n adjust = power >> 1\n self.size = (int((self.size[0] + adjust) / power),\n int((self.size[1] + adjust) / power))\n\n if self.tile:\n # Update the reduce and layers settings\n t = self.tile[0]\n t3 = (t[3][0], self.reduce, self.layers, t[3][3], t[3][4])\n self.tile = [(t[0], (0, 0) + self.size, t[2], t3)]\n\n ImageFile.ImageFile.load(self)\n\n\ndef _accept(prefix):\n return (prefix[:4] == b'\\xff\\x4f\\xff\\x51'\n or prefix[:12] == b'\\x00\\x00\\x00\\x0cjP \\x0d\\x0a\\x87\\x0a')\n\n\n# ------------------------------------------------------------\n# Save support\n\ndef _save(im, fp, filename):\n if filename.endswith('.j2k'):\n kind = 'j2k'\n else:\n kind = 'jp2'\n\n # Get the keyword arguments\n info = im.encoderinfo\n\n offset = info.get('offset', None)\n tile_offset = info.get('tile_offset', None)\n tile_size = info.get('tile_size', None)\n quality_mode = info.get('quality_mode', 'rates')\n quality_layers = info.get('quality_layers', None)\n num_resolutions = info.get('num_resolutions', 0)\n cblk_size = info.get('codeblock_size', None)\n precinct_size = info.get('precinct_size', None)\n irreversible = info.get('irreversible', False)\n progression = info.get('progression', 'LRCP')\n cinema_mode = info.get('cinema_mode', 'no')\n fd = -1\n\n if hasattr(fp, \"fileno\"):\n try:\n fd = fp.fileno()\n except:\n fd = -1\n\n im.encoderconfig = (\n offset,\n tile_offset,\n tile_size,\n quality_mode,\n quality_layers,\n num_resolutions,\n cblk_size,\n precinct_size,\n irreversible,\n progression,\n cinema_mode,\n fd\n )\n\n ImageFile._save(im, fp, [('jpeg2k', (0, 0)+im.size, 0, kind)])\n\n# ------------------------------------------------------------\n# Registry stuff\n\nImage.register_open('JPEG2000', Jpeg2KImageFile, _accept)\nImage.register_save('JPEG2000', _save)\n\nImage.register_extension('JPEG2000', '.jp2')\nImage.register_extension('JPEG2000', '.j2k')\nImage.register_extension('JPEG2000', '.jpc')\nImage.register_extension('JPEG2000', '.jpf')\nImage.register_extension('JPEG2000', '.jpx')\nImage.register_extension('JPEG2000', '.j2c')\n\nImage.register_mime('JPEG2000', 'image/jp2')\nImage.register_mime('JPEG2000', 'image/jpx')\n", "path": "PIL/Jpeg2KImagePlugin.py"}]}
3,494
674
gh_patches_debug_23568
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-2921
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Clean up dependencies Spring cleaning! We currently declare some dependencies which are either unused or can easily be substituted: - h11 - not used at all? - requests - tests + examples only. We should IMHO also eventually consider removing the following dependencies, although that involves a bit of work and shouldn't be in scope for this issue: - pyasn1 - replace with asn1crypto, which is used by cryptography/pyOpenSSL - ldap3 - only used for ldap proxy auth, which should probably live outside of the core once we have a healthy addon system. </issue> <code> [start of setup.py] 1 import os 2 from codecs import open 3 4 import re 5 from setuptools import setup, find_packages 6 7 # Based on https://github.com/pypa/sampleproject/blob/master/setup.py 8 # and https://python-packaging-user-guide.readthedocs.org/ 9 10 here = os.path.abspath(os.path.dirname(__file__)) 11 12 with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: 13 long_description = f.read() 14 15 with open(os.path.join(here, "mitmproxy", "version.py")) as f: 16 VERSION = re.search(r'VERSION = "(.+?)(?:-0x|")', f.read()).group(1) 17 18 setup( 19 name="mitmproxy", 20 version=VERSION, 21 description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.", 22 long_description=long_description, 23 url="http://mitmproxy.org", 24 author="Aldo Cortesi", 25 author_email="aldo@corte.si", 26 license="MIT", 27 classifiers=[ 28 "License :: OSI Approved :: MIT License", 29 "Development Status :: 5 - Production/Stable", 30 "Environment :: Console", 31 "Environment :: Console :: Curses", 32 "Operating System :: MacOS :: MacOS X", 33 "Operating System :: POSIX", 34 "Operating System :: Microsoft :: Windows", 35 "Programming Language :: Python", 36 "Programming Language :: Python :: 3", 37 "Programming Language :: Python :: 3 :: Only", 38 "Programming Language :: Python :: 3.5", 39 "Programming Language :: Python :: 3.6", 40 "Programming Language :: Python :: Implementation :: CPython", 41 "Topic :: Security", 42 "Topic :: Internet", 43 "Topic :: Internet :: WWW/HTTP", 44 "Topic :: Internet :: Proxy Servers", 45 "Topic :: Software Development :: Testing" 46 ], 47 packages=find_packages(include=[ 48 "mitmproxy", "mitmproxy.*", 49 "pathod", "pathod.*", 50 ]), 51 include_package_data=True, 52 entry_points={ 53 'console_scripts': [ 54 "mitmproxy = mitmproxy.tools.main:mitmproxy", 55 "mitmdump = mitmproxy.tools.main:mitmdump", 56 "mitmweb = mitmproxy.tools.main:mitmweb", 57 "pathod = pathod.pathod_cmdline:go_pathod", 58 "pathoc = pathod.pathoc_cmdline:go_pathoc" 59 ] 60 }, 61 # https://packaging.python.org/en/latest/requirements/#install-requires 62 # It is not considered best practice to use install_requires to pin dependencies to specific versions. 63 install_requires=[ 64 "blinker>=1.4, <1.5", 65 "brotlipy>=0.7.0,<0.8", 66 "certifi>=2015.11.20.1", # no semver here - this should always be on the last release! 67 "click>=6.2, <7", 68 "cryptography>=2.1.4,<2.2", 69 'h11>=0.7.0,<0.8', 70 "h2>=3.0.1,<4", 71 "hyperframe>=5.1.0,<6", 72 "kaitaistruct>=0.7,<0.9", 73 "ldap3>=2.4,<2.5", 74 "passlib>=1.6.5, <1.8", 75 "pyasn1>=0.3.1,<0.5", 76 "pyOpenSSL>=17.5,<17.6", 77 "pyparsing>=2.1.3, <2.3", 78 "pyperclip>=1.6.0, <1.7", 79 "requests>=2.9.1, <3", 80 "ruamel.yaml>=0.13.2, <0.16", 81 "sortedcontainers>=1.5.4, <1.6", 82 "tornado>=4.3, <4.6", 83 "urwid>=2.0.1,<2.1", 84 "wsproto>=0.11.0,<0.12.0", 85 ], 86 extras_require={ 87 ':sys_platform == "win32"': [ 88 "pydivert>=2.0.3,<2.2", 89 ], 90 'dev': [ 91 "flake8>=3.5, <3.6", 92 "Flask>=0.10.1, <0.13", 93 "mypy>=0.560,<0.561", 94 "pytest-cov>=2.5.1,<3", 95 "pytest-faulthandler>=1.3.1,<2", 96 "pytest-timeout>=1.2.1,<2", 97 "pytest-xdist>=1.22,<2", 98 "pytest>=3.3,<4", 99 "tox>=2.3, <3", 100 "rstcheck>=2.2, <4.0", 101 ], 102 'examples': [ 103 "beautifulsoup4>=4.4.1, <4.7", 104 "Pillow>=4.3,<5.1", 105 ] 106 } 107 ) 108 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -66,7 +66,6 @@ "certifi>=2015.11.20.1", # no semver here - this should always be on the last release! "click>=6.2, <7", "cryptography>=2.1.4,<2.2", - 'h11>=0.7.0,<0.8', "h2>=3.0.1,<4", "hyperframe>=5.1.0,<6", "kaitaistruct>=0.7,<0.9", @@ -76,7 +75,6 @@ "pyOpenSSL>=17.5,<17.6", "pyparsing>=2.1.3, <2.3", "pyperclip>=1.6.0, <1.7", - "requests>=2.9.1, <3", "ruamel.yaml>=0.13.2, <0.16", "sortedcontainers>=1.5.4, <1.6", "tornado>=4.3, <4.6", @@ -96,6 +94,7 @@ "pytest-timeout>=1.2.1,<2", "pytest-xdist>=1.22,<2", "pytest>=3.3,<4", + "requests>=2.9.1, <3", "tox>=2.3, <3", "rstcheck>=2.2, <4.0", ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,6 @@\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"click>=6.2, <7\",\n \"cryptography>=2.1.4,<2.2\",\n- 'h11>=0.7.0,<0.8',\n \"h2>=3.0.1,<4\",\n \"hyperframe>=5.1.0,<6\",\n \"kaitaistruct>=0.7,<0.9\",\n@@ -76,7 +75,6 @@\n \"pyOpenSSL>=17.5,<17.6\",\n \"pyparsing>=2.1.3, <2.3\",\n \"pyperclip>=1.6.0, <1.7\",\n- \"requests>=2.9.1, <3\",\n \"ruamel.yaml>=0.13.2, <0.16\",\n \"sortedcontainers>=1.5.4, <1.6\",\n \"tornado>=4.3, <4.6\",\n@@ -96,6 +94,7 @@\n \"pytest-timeout>=1.2.1,<2\",\n \"pytest-xdist>=1.22,<2\",\n \"pytest>=3.3,<4\",\n+ \"requests>=2.9.1, <3\",\n \"tox>=2.3, <3\",\n \"rstcheck>=2.2, <4.0\",\n ],\n", "issue": "Clean up dependencies\nSpring cleaning! We currently declare some dependencies which are either unused or can easily be substituted:\r\n\r\n - h11 - not used at all?\r\n - requests - tests + examples only.\r\n\r\nWe should IMHO also eventually consider removing the following dependencies, although that involves a bit of work and shouldn't be in scope for this issue:\r\n\r\n - pyasn1 - replace with asn1crypto, which is used by cryptography/pyOpenSSL\r\n - ldap3 - only used for ldap proxy auth, which should probably live outside of the core once we have a healthy addon system.\n", "before_files": [{"content": "import os\nfrom codecs import open\n\nimport re\nfrom setuptools import setup, find_packages\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nwith open(os.path.join(here, \"mitmproxy\", \"version.py\")) as f:\n VERSION = re.search(r'VERSION = \"(.+?)(?:-0x|\")', f.read()).group(1)\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"aldo@corte.si\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"blinker>=1.4, <1.5\",\n \"brotlipy>=0.7.0,<0.8\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"click>=6.2, <7\",\n \"cryptography>=2.1.4,<2.2\",\n 'h11>=0.7.0,<0.8',\n \"h2>=3.0.1,<4\",\n \"hyperframe>=5.1.0,<6\",\n \"kaitaistruct>=0.7,<0.9\",\n \"ldap3>=2.4,<2.5\",\n \"passlib>=1.6.5, <1.8\",\n \"pyasn1>=0.3.1,<0.5\",\n \"pyOpenSSL>=17.5,<17.6\",\n \"pyparsing>=2.1.3, <2.3\",\n \"pyperclip>=1.6.0, <1.7\",\n \"requests>=2.9.1, <3\",\n \"ruamel.yaml>=0.13.2, <0.16\",\n \"sortedcontainers>=1.5.4, <1.6\",\n \"tornado>=4.3, <4.6\",\n \"urwid>=2.0.1,<2.1\",\n \"wsproto>=0.11.0,<0.12.0\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n \"flake8>=3.5, <3.6\",\n \"Flask>=0.10.1, <0.13\",\n \"mypy>=0.560,<0.561\",\n \"pytest-cov>=2.5.1,<3\",\n \"pytest-faulthandler>=1.3.1,<2\",\n \"pytest-timeout>=1.2.1,<2\",\n \"pytest-xdist>=1.22,<2\",\n \"pytest>=3.3,<4\",\n \"tox>=2.3, <3\",\n \"rstcheck>=2.2, <4.0\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.7\",\n \"Pillow>=4.3,<5.1\",\n ]\n }\n)\n", "path": "setup.py"}]}
1,991
367
gh_patches_debug_33273
rasdani/github-patches
git_diff
GeotrekCE__Geotrek-admin-1377
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Should not disable edit button if having bypass structure permission Workaround: write url by hand (eg. "/trek/edit/1/"). </issue> <code> [start of geotrek/authent/models.py] 1 # -*- coding: utf-8 -*- 2 3 """ 4 Models to manage users and profiles 5 """ 6 from django.db import models 7 from django.contrib.auth.models import User 8 from django.conf import settings 9 from django.utils.translation import ugettext_lazy as _ 10 from django.dispatch import receiver 11 from django.contrib.auth.signals import user_logged_in 12 13 from geotrek.common.utils import reify 14 15 16 class Structure(models.Model): 17 """ 18 Represents an organisational structure, to which users are related. 19 """ 20 name = models.CharField(max_length=256, verbose_name=_(u"Nom")) 21 22 def __unicode__(self): 23 return self.name 24 25 class Meta: 26 verbose_name = _(u"Structure") 27 verbose_name_plural = _(u"Structures") 28 ordering = ['name'] 29 permissions = (("can_bypass_structure", _("Can by structure")),) 30 31 32 def default_structure(): 33 """ Create default structure if necessary """ 34 return Structure.objects.get_or_create(name=settings.DEFAULT_STRUCTURE_NAME)[0] 35 36 37 class StructureRelatedQuerySet(models.query.QuerySet): 38 def for_user(self, user): 39 return StructureRelatedQuerySet.queryset_for_user(self, user) 40 41 @staticmethod 42 def queryset_for_user(queryset, user): 43 return queryset.filter(structure=user.profile.structure) 44 45 46 class StructureRelatedManager(models.Manager): 47 """ A simple manager to manage structure related objects""" 48 def get_queryset(self): 49 return StructureRelatedQuerySet(self.model, using=self._db) 50 51 def for_user(self, user): 52 """ Filter by user's structure """ 53 return self.get_queryset().for_user(user) 54 55 56 class StructureRelated(models.Model): 57 """ 58 A mixin used for any entities that belong to a structure 59 """ 60 structure = models.ForeignKey(Structure, default=default_structure, 61 verbose_name=_(u"Related structure"), db_column='structure') 62 63 objects = models.Manager() 64 in_structure = StructureRelatedManager() 65 66 @classmethod 67 def for_user(cls, user): 68 """ Shortcut to manager's filter by user """ 69 return cls.in_structure.for_user(user) 70 71 def same_structure(self, user): 72 """ Returns True if the user is in the same structure, False otherwise. """ 73 return user.profile.structure == self.structure 74 75 class Meta: 76 abstract = True 77 verbose_name = _(u"Related structures") 78 verbose_name_plural = _(u"Related structure") 79 80 81 class UserProfile(StructureRelated): 82 """ 83 A custom user profile 84 """ 85 user = models.OneToOneField(User, unique=True) 86 87 language = models.CharField(_(u"Language"), max_length=10, 88 choices=settings.LANGUAGES, 89 default=settings.LANGUAGE_CODE) 90 91 class Meta: 92 verbose_name = _(u"User's profile") 93 verbose_name_plural = _(u"User's profiles") 94 95 def __unicode__(self): 96 return _("Profile for %s") % self.user 97 98 User.profile = reify(lambda u: UserProfile.objects.get_or_create(user=u)[0]) 99 100 101 @receiver(user_logged_in) 102 def lang(sender, **kwargs): 103 """ Set user's language in session when he logs in. """ 104 lang_code = kwargs['user'].profile.language 105 kwargs['request'].session['django_language'] = lang_code 106 [end of geotrek/authent/models.py] [start of geotrek/authent/decorators.py] 1 from functools import wraps 2 3 from django.shortcuts import redirect 4 from django.contrib import messages 5 from django.http import HttpResponseRedirect 6 from django.utils.translation import ugettext_lazy as _ 7 from django.utils.decorators import available_attrs 8 9 10 def same_structure_required(redirect_to): 11 """ 12 A decorator for class-based views. It relies on ``self.get_object()`` 13 method object, and assumes decorated views to handle ``StructureRelated`` 14 objects. 15 """ 16 def decorator(view_func): 17 @wraps(view_func, assigned=available_attrs(view_func)) 18 def _wrapped_view(self, request, *args, **kwargs): 19 result = view_func(self, request, *args, **kwargs) 20 21 # Superuser is always allowed 22 if request.user.is_superuser: 23 return result 24 25 if isinstance(result, HttpResponseRedirect): 26 return result 27 28 can_bypass_structure = request.user.has_perm('can_bypass_structure') 29 obj = hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None) 30 if can_bypass_structure or (obj and obj.same_structure(request.user)): 31 return result 32 messages.warning(request, _(u'Access to the requested resource is restricted by structure. You have been redirected.')) 33 34 return redirect(redirect_to, *args, **kwargs) 35 return _wrapped_view 36 return decorator 37 [end of geotrek/authent/decorators.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/geotrek/authent/decorators.py b/geotrek/authent/decorators.py --- a/geotrek/authent/decorators.py +++ b/geotrek/authent/decorators.py @@ -18,16 +18,11 @@ def _wrapped_view(self, request, *args, **kwargs): result = view_func(self, request, *args, **kwargs) - # Superuser is always allowed - if request.user.is_superuser: - return result - if isinstance(result, HttpResponseRedirect): return result - can_bypass_structure = request.user.has_perm('can_bypass_structure') obj = hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None) - if can_bypass_structure or (obj and obj.same_structure(request.user)): + if obj.same_structure(request.user): return result messages.warning(request, _(u'Access to the requested resource is restricted by structure. You have been redirected.')) diff --git a/geotrek/authent/models.py b/geotrek/authent/models.py --- a/geotrek/authent/models.py +++ b/geotrek/authent/models.py @@ -26,7 +26,7 @@ verbose_name = _(u"Structure") verbose_name_plural = _(u"Structures") ordering = ['name'] - permissions = (("can_bypass_structure", _("Can by structure")),) + permissions = (("can_bypass_structure", _("Can bypass structure")),) def default_structure(): @@ -69,8 +69,11 @@ return cls.in_structure.for_user(user) def same_structure(self, user): - """ Returns True if the user is in the same structure, False otherwise. """ - return user.profile.structure == self.structure + """ Returns True if the user is in the same structure or has + bypass_structure permission, False otherwise. """ + return (user.profile.structure == self.structure or + user.is_superuser or + user.has_perm('authent.can_bypass_structure')) class Meta: abstract = True
{"golden_diff": "diff --git a/geotrek/authent/decorators.py b/geotrek/authent/decorators.py\n--- a/geotrek/authent/decorators.py\n+++ b/geotrek/authent/decorators.py\n@@ -18,16 +18,11 @@\n def _wrapped_view(self, request, *args, **kwargs):\n result = view_func(self, request, *args, **kwargs)\n \n- # Superuser is always allowed\n- if request.user.is_superuser:\n- return result\n-\n if isinstance(result, HttpResponseRedirect):\n return result\n \n- can_bypass_structure = request.user.has_perm('can_bypass_structure')\n obj = hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None)\n- if can_bypass_structure or (obj and obj.same_structure(request.user)):\n+ if obj.same_structure(request.user):\n return result\n messages.warning(request, _(u'Access to the requested resource is restricted by structure. You have been redirected.'))\n \ndiff --git a/geotrek/authent/models.py b/geotrek/authent/models.py\n--- a/geotrek/authent/models.py\n+++ b/geotrek/authent/models.py\n@@ -26,7 +26,7 @@\n verbose_name = _(u\"Structure\")\n verbose_name_plural = _(u\"Structures\")\n ordering = ['name']\n- permissions = ((\"can_bypass_structure\", _(\"Can by structure\")),)\n+ permissions = ((\"can_bypass_structure\", _(\"Can bypass structure\")),)\n \n \n def default_structure():\n@@ -69,8 +69,11 @@\n return cls.in_structure.for_user(user)\n \n def same_structure(self, user):\n- \"\"\" Returns True if the user is in the same structure, False otherwise. \"\"\"\n- return user.profile.structure == self.structure\n+ \"\"\" Returns True if the user is in the same structure or has\n+ bypass_structure permission, False otherwise. \"\"\"\n+ return (user.profile.structure == self.structure or\n+ user.is_superuser or\n+ user.has_perm('authent.can_bypass_structure'))\n \n class Meta:\n abstract = True\n", "issue": "Should not disable edit button if having bypass structure permission\nWorkaround: write url by hand (eg. \"/trek/edit/1/\").\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\n Models to manage users and profiles\n\"\"\"\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.dispatch import receiver\nfrom django.contrib.auth.signals import user_logged_in\n\nfrom geotrek.common.utils import reify\n\n\nclass Structure(models.Model):\n \"\"\"\n Represents an organisational structure, to which users are related.\n \"\"\"\n name = models.CharField(max_length=256, verbose_name=_(u\"Nom\"))\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n verbose_name = _(u\"Structure\")\n verbose_name_plural = _(u\"Structures\")\n ordering = ['name']\n permissions = ((\"can_bypass_structure\", _(\"Can by structure\")),)\n\n\ndef default_structure():\n \"\"\" Create default structure if necessary \"\"\"\n return Structure.objects.get_or_create(name=settings.DEFAULT_STRUCTURE_NAME)[0]\n\n\nclass StructureRelatedQuerySet(models.query.QuerySet):\n def for_user(self, user):\n return StructureRelatedQuerySet.queryset_for_user(self, user)\n\n @staticmethod\n def queryset_for_user(queryset, user):\n return queryset.filter(structure=user.profile.structure)\n\n\nclass StructureRelatedManager(models.Manager):\n \"\"\" A simple manager to manage structure related objects\"\"\"\n def get_queryset(self):\n return StructureRelatedQuerySet(self.model, using=self._db)\n\n def for_user(self, user):\n \"\"\" Filter by user's structure \"\"\"\n return self.get_queryset().for_user(user)\n\n\nclass StructureRelated(models.Model):\n \"\"\"\n A mixin used for any entities that belong to a structure\n \"\"\"\n structure = models.ForeignKey(Structure, default=default_structure,\n verbose_name=_(u\"Related structure\"), db_column='structure')\n\n objects = models.Manager()\n in_structure = StructureRelatedManager()\n\n @classmethod\n def for_user(cls, user):\n \"\"\" Shortcut to manager's filter by user \"\"\"\n return cls.in_structure.for_user(user)\n\n def same_structure(self, user):\n \"\"\" Returns True if the user is in the same structure, False otherwise. \"\"\"\n return user.profile.structure == self.structure\n\n class Meta:\n abstract = True\n verbose_name = _(u\"Related structures\")\n verbose_name_plural = _(u\"Related structure\")\n\n\nclass UserProfile(StructureRelated):\n \"\"\"\n A custom user profile\n \"\"\"\n user = models.OneToOneField(User, unique=True)\n\n language = models.CharField(_(u\"Language\"), max_length=10,\n choices=settings.LANGUAGES,\n default=settings.LANGUAGE_CODE)\n\n class Meta:\n verbose_name = _(u\"User's profile\")\n verbose_name_plural = _(u\"User's profiles\")\n\n def __unicode__(self):\n return _(\"Profile for %s\") % self.user\n\nUser.profile = reify(lambda u: UserProfile.objects.get_or_create(user=u)[0])\n\n\n@receiver(user_logged_in)\ndef lang(sender, **kwargs):\n \"\"\" Set user's language in session when he logs in. \"\"\"\n lang_code = kwargs['user'].profile.language\n kwargs['request'].session['django_language'] = lang_code\n", "path": "geotrek/authent/models.py"}, {"content": "from functools import wraps\n\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.decorators import available_attrs\n\n\ndef same_structure_required(redirect_to):\n \"\"\"\n A decorator for class-based views. It relies on ``self.get_object()``\n method object, and assumes decorated views to handle ``StructureRelated``\n objects.\n \"\"\"\n def decorator(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(self, request, *args, **kwargs):\n result = view_func(self, request, *args, **kwargs)\n\n # Superuser is always allowed\n if request.user.is_superuser:\n return result\n\n if isinstance(result, HttpResponseRedirect):\n return result\n\n can_bypass_structure = request.user.has_perm('can_bypass_structure')\n obj = hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None)\n if can_bypass_structure or (obj and obj.same_structure(request.user)):\n return result\n messages.warning(request, _(u'Access to the requested resource is restricted by structure. You have been redirected.'))\n\n return redirect(redirect_to, *args, **kwargs)\n return _wrapped_view\n return decorator\n", "path": "geotrek/authent/decorators.py"}]}
1,831
465
gh_patches_debug_19323
rasdani/github-patches
git_diff
PokemonGoF__PokemonGo-Bot-5036
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Crash on Level Up I'm gonna guess an issue with: https://github.com/PokemonGoF/PokemonGo-Bot/pull/5016 which is also the version im on ``` Traceback (most recent call last): File "pokecli.py", line 781, in <module> main() File "pokecli.py", line 139, in main bot.tick() File "C:\Users\Steve\Downloads\PokemonGo-Bot\pokemongo_bot\__init__.py", line 658, in tick if worker.work() == WorkerResult.RUNNING: File "C:\Users\Steve\Downloads\PokemonGo-Bot\pokemongo_bot\cell_workers\collect_level_up_reward.py", line 37, in work self._collect_level_reward() File "C:\Users\Steve\Downloads\PokemonGo-Bot\pokemongo_bot\cell_workers\collect_level_up_reward.py", line 70, in _collect_level_reward 'items': ', '.join(["{}x {}".format(data[x], x) for x in data]) TypeError: list indices must be integers, not dict ``` </issue> <code> [start of pokemongo_bot/cell_workers/collect_level_up_reward.py] 1 import sys 2 3 from pokemongo_bot.base_task import BaseTask 4 from pokemongo_bot import inventory 5 6 7 class CollectLevelUpReward(BaseTask): 8 SUPPORTED_TASK_API_VERSION = 1 9 10 current_level = 0 11 previous_level = 0 12 13 def initialize(self): 14 self._process_config() 15 self.current_level = inventory.player().level 16 self.previous_level = 0 17 18 def work(self): 19 if self._should_run(): 20 self.current_level = inventory.player().level 21 22 if self.collect_reward: 23 # let's check level reward on bot initialization 24 # to be able get rewards for old bots 25 if self.previous_level == 0: 26 self._collect_level_reward() 27 # level up situation 28 elif self.current_level > self.previous_level: 29 self.emit_event( 30 'level_up', 31 formatted='Level up from {previous_level} to {current_level}', 32 data={ 33 'previous_level': self.previous_level, 34 'current_level': self.current_level 35 } 36 ) 37 self._collect_level_reward() 38 39 if self.level_limit != -1 and self.current_level >= self.level_limit: 40 sys.exit("You have reached your target level! Exiting now.") 41 42 self.previous_level = self.current_level 43 44 def _process_config(self): 45 self.level_limit = self.config.get('level_limit', -1) 46 self.collect_reward = self.config.get('collect_reward', True) 47 48 def _should_run(self): 49 return self.level_limit != -1 or self.collect_reward 50 51 def _collect_level_reward(self): 52 response_dict = self.bot.api.level_up_rewards(level=self.current_level) 53 if 'status_code' in response_dict and response_dict['status_code'] == 1: 54 data = (response_dict 55 .get('responses', {}) 56 .get('LEVEL_UP_REWARDS', {}) 57 .get('items_awarded', [])) 58 59 for item in data: 60 if 'item_id' in item and str(item['item_id']) in self.bot.item_list: 61 got_item = self.bot.item_list[str(item['item_id'])] 62 item['name'] = got_item 63 count = 'item_count' in item and item['item_count'] or 0 64 inventory.items().get(item['item_id']).add(count) 65 try: 66 self.emit_event( 67 'level_up_reward', 68 formatted='Received level up reward: {items}', 69 data={ 70 'items': ', '.join(["{}x {}".format(data[x], x) for x in data]) 71 } 72 ) 73 except TypeError: 74 pass 75 [end of pokemongo_bot/cell_workers/collect_level_up_reward.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pokemongo_bot/cell_workers/collect_level_up_reward.py b/pokemongo_bot/cell_workers/collect_level_up_reward.py --- a/pokemongo_bot/cell_workers/collect_level_up_reward.py +++ b/pokemongo_bot/cell_workers/collect_level_up_reward.py @@ -62,13 +62,11 @@ item['name'] = got_item count = 'item_count' in item and item['item_count'] or 0 inventory.items().get(item['item_id']).add(count) - try: - self.emit_event( - 'level_up_reward', - formatted='Received level up reward: {items}', - data={ - 'items': ', '.join(["{}x {}".format(data[x], x) for x in data]) - } - ) - except TypeError: - pass + self.emit_event( + 'level_up_reward', + formatted='Received level up reward: {items}', + data={ + # [{'item_id': 3, 'name': u'Ultraball', 'item_count': 10}, {'item_id': 103, 'name': u'Hyper Potion', 'item_count': 10}] + 'items': ', '.join(["{}x {}".format(x['item_count'], x['name']) for x in data]) + } + )
{"golden_diff": "diff --git a/pokemongo_bot/cell_workers/collect_level_up_reward.py b/pokemongo_bot/cell_workers/collect_level_up_reward.py\n--- a/pokemongo_bot/cell_workers/collect_level_up_reward.py\n+++ b/pokemongo_bot/cell_workers/collect_level_up_reward.py\n@@ -62,13 +62,11 @@\n item['name'] = got_item\n count = 'item_count' in item and item['item_count'] or 0\n inventory.items().get(item['item_id']).add(count)\n- try:\n- self.emit_event(\n- 'level_up_reward',\n- formatted='Received level up reward: {items}',\n- data={\n- 'items': ', '.join([\"{}x {}\".format(data[x], x) for x in data])\n- }\n- )\n- except TypeError:\n- pass\n+ self.emit_event(\n+ 'level_up_reward',\n+ formatted='Received level up reward: {items}',\n+ data={\n+ # [{'item_id': 3, 'name': u'Ultraball', 'item_count': 10}, {'item_id': 103, 'name': u'Hyper Potion', 'item_count': 10}]\n+ 'items': ', '.join([\"{}x {}\".format(x['item_count'], x['name']) for x in data])\n+ }\n+ )\n", "issue": "Crash on Level Up\nI'm gonna guess an issue with:\nhttps://github.com/PokemonGoF/PokemonGo-Bot/pull/5016\n\nwhich is also the version im on\n\n```\nTraceback (most recent call last):\n File \"pokecli.py\", line 781, in <module>\n main()\n File \"pokecli.py\", line 139, in main\n bot.tick()\n File \"C:\\Users\\Steve\\Downloads\\PokemonGo-Bot\\pokemongo_bot\\__init__.py\", line 658, in tick\n if worker.work() == WorkerResult.RUNNING:\n File \"C:\\Users\\Steve\\Downloads\\PokemonGo-Bot\\pokemongo_bot\\cell_workers\\collect_level_up_reward.py\", line 37, in work\n self._collect_level_reward()\n File \"C:\\Users\\Steve\\Downloads\\PokemonGo-Bot\\pokemongo_bot\\cell_workers\\collect_level_up_reward.py\", line 70, in _collect_level_reward\n 'items': ', '.join([\"{}x {}\".format(data[x], x) for x in data])\nTypeError: list indices must be integers, not dict\n```\n\n", "before_files": [{"content": "import sys\n\nfrom pokemongo_bot.base_task import BaseTask\nfrom pokemongo_bot import inventory\n\n\nclass CollectLevelUpReward(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n\n current_level = 0\n previous_level = 0\n\n def initialize(self):\n self._process_config()\n self.current_level = inventory.player().level\n self.previous_level = 0\n\n def work(self):\n if self._should_run():\n self.current_level = inventory.player().level\n\n if self.collect_reward:\n # let's check level reward on bot initialization\n # to be able get rewards for old bots\n if self.previous_level == 0:\n self._collect_level_reward()\n # level up situation\n elif self.current_level > self.previous_level:\n self.emit_event(\n 'level_up',\n formatted='Level up from {previous_level} to {current_level}',\n data={\n 'previous_level': self.previous_level,\n 'current_level': self.current_level\n }\n )\n self._collect_level_reward()\n\n if self.level_limit != -1 and self.current_level >= self.level_limit:\n sys.exit(\"You have reached your target level! Exiting now.\")\n\n self.previous_level = self.current_level\n\n def _process_config(self):\n self.level_limit = self.config.get('level_limit', -1)\n self.collect_reward = self.config.get('collect_reward', True)\n\n def _should_run(self):\n return self.level_limit != -1 or self.collect_reward\n\n def _collect_level_reward(self):\n response_dict = self.bot.api.level_up_rewards(level=self.current_level)\n if 'status_code' in response_dict and response_dict['status_code'] == 1:\n data = (response_dict\n .get('responses', {})\n .get('LEVEL_UP_REWARDS', {})\n .get('items_awarded', []))\n\n for item in data:\n if 'item_id' in item and str(item['item_id']) in self.bot.item_list:\n got_item = self.bot.item_list[str(item['item_id'])]\n item['name'] = got_item\n count = 'item_count' in item and item['item_count'] or 0\n inventory.items().get(item['item_id']).add(count)\n try:\n self.emit_event(\n 'level_up_reward',\n formatted='Received level up reward: {items}',\n data={\n 'items': ', '.join([\"{}x {}\".format(data[x], x) for x in data])\n }\n )\n except TypeError:\n pass\n", "path": "pokemongo_bot/cell_workers/collect_level_up_reward.py"}]}
1,507
309
gh_patches_debug_43038
rasdani/github-patches
git_diff
pantsbuild__pants-16977
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support `parametrize(...)` assignments in the `__defaults__` macro **Is your feature request related to a problem? Please describe.** The `__defaults__` BUILD file construct helps eliminate the need for boilerplate target field assignments across an entire directory tree. However, it does not support assigning the result of the `parametrize` macro to a field. ``` # BUILD __defaults__({ python_sources: dict(resolve=parametrize(my_resolves())), # InvalidFieldTypeException: expects string, not Parametrize }) ``` **Describe the solution you'd like** Support `parametrize` in this context in the natural way, i.e. interpret applicable targets as having the field assigned to `parametrize(...)`. **Describe alternatives you've considered** Modifying `tailor` to populate this field on a per-directory basis might work. **Additional context** This is very useful when a single Python subproject is intended to run under several lockfiles. </issue> <code> [start of src/python/pants/engine/internals/defaults.py] 1 # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 """The `BuildFileDefaultsParserState.set_defaults` is used by the pants.engine.internals.Parser, 4 exposed as the `__defaults__` BUILD file symbol. 5 6 When parsing a BUILD (from the rule `pants.engine.internals.build_files.parse_address_family`) the 7 defaults from the closest parent BUILD file is passed as input to the parser, and the new defaults 8 resulting after the BUILD file have been parsed is returned in the `AddressFamily`. 9 10 These defaults are then applied when creating the `TargetAdaptor` targets by the `Registrar` in the 11 parser. 12 """ 13 from __future__ import annotations 14 15 from dataclasses import dataclass 16 from typing import Any, Iterable, Mapping, Tuple, Union 17 18 from pants.engine.addresses import Address 19 from pants.engine.target import ( 20 Field, 21 ImmutableValue, 22 InvalidFieldException, 23 RegisteredTargetTypes, 24 Target, 25 TargetGenerator, 26 ) 27 from pants.engine.unions import UnionMembership 28 from pants.util.frozendict import FrozenDict 29 30 SetDefaultsValueT = Mapping[str, Any] 31 SetDefaultsKeyT = Union[str, Tuple[str, ...]] 32 SetDefaultsT = Mapping[SetDefaultsKeyT, SetDefaultsValueT] 33 34 35 class BuildFileDefaults(FrozenDict[str, FrozenDict[str, ImmutableValue]]): 36 """Map target types to default field values.""" 37 38 39 @dataclass 40 class BuildFileDefaultsParserState: 41 address: Address 42 defaults: dict[str, Mapping[str, Any]] 43 registered_target_types: RegisteredTargetTypes 44 union_membership: UnionMembership 45 46 @classmethod 47 def create( 48 cls, 49 path: str, 50 defaults: BuildFileDefaults, 51 registered_target_types: RegisteredTargetTypes, 52 union_membership: UnionMembership, 53 ) -> BuildFileDefaultsParserState: 54 return cls( 55 address=Address(path, generated_name="__defaults__"), 56 defaults=dict(defaults), 57 registered_target_types=registered_target_types, 58 union_membership=union_membership, 59 ) 60 61 def get_frozen_defaults(self) -> BuildFileDefaults: 62 types = self.registered_target_types.aliases_to_types 63 return BuildFileDefaults( 64 { 65 target_alias: FrozenDict( 66 { 67 field_type.alias: field_type.compute_value( 68 raw_value=default, address=self.address 69 ) 70 for field_alias, default in fields.items() 71 for field_type in self._target_type_field_types(types[target_alias]) 72 if field_alias in (field_type.alias, field_type.deprecated_alias) 73 } 74 ) 75 for target_alias, fields in self.defaults.items() 76 } 77 ) 78 79 def get(self, target_alias: str) -> Mapping[str, Any]: 80 # Used by `pants.engine.internals.parser.Parser._generate_symbols.Registrar.__call__` 81 return self.defaults.get(target_alias, {}) 82 83 def set_defaults( 84 self, 85 *args: SetDefaultsT, 86 all: SetDefaultsValueT | None = None, 87 extend: bool = False, 88 **kwargs, 89 ) -> None: 90 defaults: dict[str, dict[str, Any]] = ( 91 {} if not extend else {k: dict(v) for k, v in self.defaults.items()} 92 ) 93 94 if all is not None: 95 self._process_defaults( 96 defaults, 97 {tuple(self.registered_target_types.aliases): all}, 98 ignore_unknown_fields=True, 99 ) 100 101 for arg in args: 102 self._process_defaults(defaults, arg) 103 104 # Update with new defaults, dropping targets without any default values. 105 for tgt, default in defaults.items(): 106 if not default: 107 self.defaults.pop(tgt, None) 108 else: 109 self.defaults[tgt] = default 110 111 def _target_type_field_types(self, target_type: type[Target]) -> tuple[type[Field], ...]: 112 return ( 113 *target_type.class_field_types(self.union_membership), 114 *(target_type.moved_fields if issubclass(target_type, TargetGenerator) else ()), 115 ) 116 117 def _process_defaults( 118 self, 119 defaults: dict[str, dict[str, Any]], 120 targets_defaults: SetDefaultsT, 121 ignore_unknown_fields: bool = False, 122 ): 123 if not isinstance(targets_defaults, dict): 124 raise ValueError( 125 f"Expected dictionary mapping targets to default field values for {self.address} " 126 f"but got: {type(targets_defaults).__name__}." 127 ) 128 129 types = self.registered_target_types.aliases_to_types 130 for target, default in targets_defaults.items(): 131 if not isinstance(default, dict): 132 raise ValueError( 133 f"Invalid default field values in {self.address} for target type {target}, " 134 f"must be an `dict` but was {default!r} with type `{type(default).__name__}`." 135 ) 136 137 targets: Iterable[str] 138 targets = target if isinstance(target, tuple) else (target,) 139 for target_alias in map(str, targets): 140 if target_alias in types: 141 target_type = types[target_alias] 142 else: 143 raise ValueError(f"Unrecognized target type {target_alias} in {self.address}.") 144 145 # Copy default dict if we may mutate it. 146 raw_values = dict(default) if ignore_unknown_fields else default 147 148 # Validate that field exists on target 149 valid_field_aliases = set( 150 target_type._get_field_aliases_to_field_types( 151 self._target_type_field_types(target_type) 152 ).keys() 153 ) 154 155 for field_alias in default.keys(): 156 if field_alias not in valid_field_aliases: 157 if ignore_unknown_fields: 158 del raw_values[field_alias] 159 else: 160 raise InvalidFieldException( 161 f"Unrecognized field `{field_alias}` for target {target_type.alias}. " 162 f"Valid fields are: {', '.join(sorted(valid_field_aliases))}.", 163 ) 164 165 # TODO: support parametrization ? --needs special care due to Parametrize object not 166 # being hashable, and thus not acceptable in a FrozenDict instance. 167 168 # Merge all provided defaults for this call. 169 defaults.setdefault(target_type.alias, {}).update(raw_values) 170 [end of src/python/pants/engine/internals/defaults.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/python/pants/engine/internals/defaults.py b/src/python/pants/engine/internals/defaults.py --- a/src/python/pants/engine/internals/defaults.py +++ b/src/python/pants/engine/internals/defaults.py @@ -13,9 +13,10 @@ from __future__ import annotations from dataclasses import dataclass -from typing import Any, Iterable, Mapping, Tuple, Union +from typing import Any, Callable, Iterable, Mapping, Tuple, Union from pants.engine.addresses import Address +from pants.engine.internals.parametrize import Parametrize from pants.engine.target import ( Field, ImmutableValue, @@ -26,6 +27,7 @@ ) from pants.engine.unions import UnionMembership from pants.util.frozendict import FrozenDict +from pants.util.meta import frozen_after_init SetDefaultsValueT = Mapping[str, Any] SetDefaultsKeyT = Union[str, Tuple[str, ...]] @@ -36,6 +38,36 @@ """Map target types to default field values.""" +@frozen_after_init +@dataclass(unsafe_hash=True) +class ParametrizeDefault(Parametrize): + """A frozen version of `Parametrize` for defaults. + + This is needed since all defaults must be hashable, which the `Parametrize` class is not nor can + it be as it may get unhashable data as input and is unaware of the field type it is being + applied to. + """ + + args: tuple[str, ...] + kwargs: FrozenDict[str, ImmutableValue] # type: ignore[assignment] + + def __init__(self, *args: str, **kwargs: ImmutableValue) -> None: + self.args = args + self.kwargs = FrozenDict(kwargs) + + @classmethod + def create( + cls, freeze: Callable[[Any], ImmutableValue], parametrize: Parametrize + ) -> ParametrizeDefault: + return cls( + *map(freeze, parametrize.args), + **{kw: freeze(arg) for kw, arg in parametrize.kwargs.items()}, + ) + + def __repr__(self) -> str: + return super().__repr__() + + @dataclass class BuildFileDefaultsParserState: address: Address @@ -58,15 +90,25 @@ union_membership=union_membership, ) + def _freeze_field_value(self, field_type: type[Field], value: Any) -> ImmutableValue: + if isinstance(value, ParametrizeDefault): + return value + elif isinstance(value, Parametrize): + + def freeze(v: Any) -> ImmutableValue: + return self._freeze_field_value(field_type, v) + + return ParametrizeDefault.create(freeze, value) + else: + return field_type.compute_value(raw_value=value, address=self.address) + def get_frozen_defaults(self) -> BuildFileDefaults: types = self.registered_target_types.aliases_to_types return BuildFileDefaults( { target_alias: FrozenDict( { - field_type.alias: field_type.compute_value( - raw_value=default, address=self.address - ) + field_type.alias: self._freeze_field_value(field_type, default) for field_alias, default in fields.items() for field_type in self._target_type_field_types(types[target_alias]) if field_alias in (field_type.alias, field_type.deprecated_alias) @@ -162,8 +204,5 @@ f"Valid fields are: {', '.join(sorted(valid_field_aliases))}.", ) - # TODO: support parametrization ? --needs special care due to Parametrize object not - # being hashable, and thus not acceptable in a FrozenDict instance. - # Merge all provided defaults for this call. defaults.setdefault(target_type.alias, {}).update(raw_values)
{"golden_diff": "diff --git a/src/python/pants/engine/internals/defaults.py b/src/python/pants/engine/internals/defaults.py\n--- a/src/python/pants/engine/internals/defaults.py\n+++ b/src/python/pants/engine/internals/defaults.py\n@@ -13,9 +13,10 @@\n from __future__ import annotations\n \n from dataclasses import dataclass\n-from typing import Any, Iterable, Mapping, Tuple, Union\n+from typing import Any, Callable, Iterable, Mapping, Tuple, Union\n \n from pants.engine.addresses import Address\n+from pants.engine.internals.parametrize import Parametrize\n from pants.engine.target import (\n Field,\n ImmutableValue,\n@@ -26,6 +27,7 @@\n )\n from pants.engine.unions import UnionMembership\n from pants.util.frozendict import FrozenDict\n+from pants.util.meta import frozen_after_init\n \n SetDefaultsValueT = Mapping[str, Any]\n SetDefaultsKeyT = Union[str, Tuple[str, ...]]\n@@ -36,6 +38,36 @@\n \"\"\"Map target types to default field values.\"\"\"\n \n \n+@frozen_after_init\n+@dataclass(unsafe_hash=True)\n+class ParametrizeDefault(Parametrize):\n+ \"\"\"A frozen version of `Parametrize` for defaults.\n+\n+ This is needed since all defaults must be hashable, which the `Parametrize` class is not nor can\n+ it be as it may get unhashable data as input and is unaware of the field type it is being\n+ applied to.\n+ \"\"\"\n+\n+ args: tuple[str, ...]\n+ kwargs: FrozenDict[str, ImmutableValue] # type: ignore[assignment]\n+\n+ def __init__(self, *args: str, **kwargs: ImmutableValue) -> None:\n+ self.args = args\n+ self.kwargs = FrozenDict(kwargs)\n+\n+ @classmethod\n+ def create(\n+ cls, freeze: Callable[[Any], ImmutableValue], parametrize: Parametrize\n+ ) -> ParametrizeDefault:\n+ return cls(\n+ *map(freeze, parametrize.args),\n+ **{kw: freeze(arg) for kw, arg in parametrize.kwargs.items()},\n+ )\n+\n+ def __repr__(self) -> str:\n+ return super().__repr__()\n+\n+\n @dataclass\n class BuildFileDefaultsParserState:\n address: Address\n@@ -58,15 +90,25 @@\n union_membership=union_membership,\n )\n \n+ def _freeze_field_value(self, field_type: type[Field], value: Any) -> ImmutableValue:\n+ if isinstance(value, ParametrizeDefault):\n+ return value\n+ elif isinstance(value, Parametrize):\n+\n+ def freeze(v: Any) -> ImmutableValue:\n+ return self._freeze_field_value(field_type, v)\n+\n+ return ParametrizeDefault.create(freeze, value)\n+ else:\n+ return field_type.compute_value(raw_value=value, address=self.address)\n+\n def get_frozen_defaults(self) -> BuildFileDefaults:\n types = self.registered_target_types.aliases_to_types\n return BuildFileDefaults(\n {\n target_alias: FrozenDict(\n {\n- field_type.alias: field_type.compute_value(\n- raw_value=default, address=self.address\n- )\n+ field_type.alias: self._freeze_field_value(field_type, default)\n for field_alias, default in fields.items()\n for field_type in self._target_type_field_types(types[target_alias])\n if field_alias in (field_type.alias, field_type.deprecated_alias)\n@@ -162,8 +204,5 @@\n f\"Valid fields are: {', '.join(sorted(valid_field_aliases))}.\",\n )\n \n- # TODO: support parametrization ? --needs special care due to Parametrize object not\n- # being hashable, and thus not acceptable in a FrozenDict instance.\n-\n # Merge all provided defaults for this call.\n defaults.setdefault(target_type.alias, {}).update(raw_values)\n", "issue": "Support `parametrize(...)` assignments in the `__defaults__` macro\n**Is your feature request related to a problem? Please describe.**\r\n\r\nThe `__defaults__` BUILD file construct helps eliminate the need for boilerplate target field assignments across an entire\r\ndirectory tree. However, it does not support assigning the result of the `parametrize` macro to a field.\r\n\r\n```\r\n# BUILD\r\n\r\n__defaults__({\r\n python_sources: dict(resolve=parametrize(my_resolves())), # InvalidFieldTypeException: expects string, not Parametrize\r\n})\r\n```\r\n\r\n**Describe the solution you'd like**\r\n\r\nSupport `parametrize` in this context in the natural way, i.e. interpret applicable targets as having the field assigned to\r\n`parametrize(...)`.\r\n\r\n**Describe alternatives you've considered**\r\n\r\nModifying `tailor` to populate this field on a per-directory basis might work.\r\n\r\n**Additional context**\r\n\r\nThis is very useful when a single Python subproject is intended to run under several lockfiles.\n", "before_files": [{"content": "# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\"\"\"The `BuildFileDefaultsParserState.set_defaults` is used by the pants.engine.internals.Parser,\nexposed as the `__defaults__` BUILD file symbol.\n\nWhen parsing a BUILD (from the rule `pants.engine.internals.build_files.parse_address_family`) the\ndefaults from the closest parent BUILD file is passed as input to the parser, and the new defaults\nresulting after the BUILD file have been parsed is returned in the `AddressFamily`.\n\nThese defaults are then applied when creating the `TargetAdaptor` targets by the `Registrar` in the\nparser.\n\"\"\"\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Any, Iterable, Mapping, Tuple, Union\n\nfrom pants.engine.addresses import Address\nfrom pants.engine.target import (\n Field,\n ImmutableValue,\n InvalidFieldException,\n RegisteredTargetTypes,\n Target,\n TargetGenerator,\n)\nfrom pants.engine.unions import UnionMembership\nfrom pants.util.frozendict import FrozenDict\n\nSetDefaultsValueT = Mapping[str, Any]\nSetDefaultsKeyT = Union[str, Tuple[str, ...]]\nSetDefaultsT = Mapping[SetDefaultsKeyT, SetDefaultsValueT]\n\n\nclass BuildFileDefaults(FrozenDict[str, FrozenDict[str, ImmutableValue]]):\n \"\"\"Map target types to default field values.\"\"\"\n\n\n@dataclass\nclass BuildFileDefaultsParserState:\n address: Address\n defaults: dict[str, Mapping[str, Any]]\n registered_target_types: RegisteredTargetTypes\n union_membership: UnionMembership\n\n @classmethod\n def create(\n cls,\n path: str,\n defaults: BuildFileDefaults,\n registered_target_types: RegisteredTargetTypes,\n union_membership: UnionMembership,\n ) -> BuildFileDefaultsParserState:\n return cls(\n address=Address(path, generated_name=\"__defaults__\"),\n defaults=dict(defaults),\n registered_target_types=registered_target_types,\n union_membership=union_membership,\n )\n\n def get_frozen_defaults(self) -> BuildFileDefaults:\n types = self.registered_target_types.aliases_to_types\n return BuildFileDefaults(\n {\n target_alias: FrozenDict(\n {\n field_type.alias: field_type.compute_value(\n raw_value=default, address=self.address\n )\n for field_alias, default in fields.items()\n for field_type in self._target_type_field_types(types[target_alias])\n if field_alias in (field_type.alias, field_type.deprecated_alias)\n }\n )\n for target_alias, fields in self.defaults.items()\n }\n )\n\n def get(self, target_alias: str) -> Mapping[str, Any]:\n # Used by `pants.engine.internals.parser.Parser._generate_symbols.Registrar.__call__`\n return self.defaults.get(target_alias, {})\n\n def set_defaults(\n self,\n *args: SetDefaultsT,\n all: SetDefaultsValueT | None = None,\n extend: bool = False,\n **kwargs,\n ) -> None:\n defaults: dict[str, dict[str, Any]] = (\n {} if not extend else {k: dict(v) for k, v in self.defaults.items()}\n )\n\n if all is not None:\n self._process_defaults(\n defaults,\n {tuple(self.registered_target_types.aliases): all},\n ignore_unknown_fields=True,\n )\n\n for arg in args:\n self._process_defaults(defaults, arg)\n\n # Update with new defaults, dropping targets without any default values.\n for tgt, default in defaults.items():\n if not default:\n self.defaults.pop(tgt, None)\n else:\n self.defaults[tgt] = default\n\n def _target_type_field_types(self, target_type: type[Target]) -> tuple[type[Field], ...]:\n return (\n *target_type.class_field_types(self.union_membership),\n *(target_type.moved_fields if issubclass(target_type, TargetGenerator) else ()),\n )\n\n def _process_defaults(\n self,\n defaults: dict[str, dict[str, Any]],\n targets_defaults: SetDefaultsT,\n ignore_unknown_fields: bool = False,\n ):\n if not isinstance(targets_defaults, dict):\n raise ValueError(\n f\"Expected dictionary mapping targets to default field values for {self.address} \"\n f\"but got: {type(targets_defaults).__name__}.\"\n )\n\n types = self.registered_target_types.aliases_to_types\n for target, default in targets_defaults.items():\n if not isinstance(default, dict):\n raise ValueError(\n f\"Invalid default field values in {self.address} for target type {target}, \"\n f\"must be an `dict` but was {default!r} with type `{type(default).__name__}`.\"\n )\n\n targets: Iterable[str]\n targets = target if isinstance(target, tuple) else (target,)\n for target_alias in map(str, targets):\n if target_alias in types:\n target_type = types[target_alias]\n else:\n raise ValueError(f\"Unrecognized target type {target_alias} in {self.address}.\")\n\n # Copy default dict if we may mutate it.\n raw_values = dict(default) if ignore_unknown_fields else default\n\n # Validate that field exists on target\n valid_field_aliases = set(\n target_type._get_field_aliases_to_field_types(\n self._target_type_field_types(target_type)\n ).keys()\n )\n\n for field_alias in default.keys():\n if field_alias not in valid_field_aliases:\n if ignore_unknown_fields:\n del raw_values[field_alias]\n else:\n raise InvalidFieldException(\n f\"Unrecognized field `{field_alias}` for target {target_type.alias}. \"\n f\"Valid fields are: {', '.join(sorted(valid_field_aliases))}.\",\n )\n\n # TODO: support parametrization ? --needs special care due to Parametrize object not\n # being hashable, and thus not acceptable in a FrozenDict instance.\n\n # Merge all provided defaults for this call.\n defaults.setdefault(target_type.alias, {}).update(raw_values)\n", "path": "src/python/pants/engine/internals/defaults.py"}]}
2,455
860
gh_patches_debug_664
rasdani/github-patches
git_diff
fedora-infra__bodhi-507
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> setup.py test doesn't include extra_requires from fedmsg deps ``` ====================================================================== ERROR: Failure: ImportError (No module named psutil) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/decause/.virtualenvs/bodhi-python2.7/lib/python2.7/site-packages/nose/loader.py", line 418, in loadTestsFromName addr.filename, addr.module) File "/home/decause/.virtualenvs/bodhi-python2.7/lib/python2.7/site-packages/nose/importer.py", line 47, in importFromPath return self.importFromDir(dir_path, fqname) File "/home/decause/.virtualenvs/bodhi-python2.7/lib/python2.7/site-packages/nose/importer.py", line 94, in importFromDir mod = load_module(part_fqname, fh, filename, desc) File "/home/decause/code/bodhi/bodhi/tests/test_masher.py", line 27, in <module> from bodhi.consumers.masher import Masher, MasherThread File "/home/decause/code/bodhi/bodhi/consumers/masher.py", line 30, in <module> import fedmsg.consumers File "/home/decause/code/bodhi/.eggs/fedmsg-0.16.0-py2.7.egg/fedmsg/consumers/__init__.py", line 25, in <module> import psutil ImportError: No module named psutil ---------------------------------------------------------------------- Ran 335 tests in 138.787s FAILED (errors=1) ``` </issue> <code> [start of setup.py] 1 import __main__ 2 __requires__ = __main__.__requires__ = 'WebOb>=1.4.1' 3 import pkg_resources 4 5 # The following two imports are required to shut up an 6 # atexit error when running tests with python 2.7 7 import logging 8 import multiprocessing 9 10 import os 11 import sys 12 13 from setuptools import setup, find_packages 14 15 here = os.path.abspath(os.path.dirname(__file__)) 16 README = open(os.path.join(here, 'README.rst')).read() 17 CHANGES = open(os.path.join(here, 'CHANGES.txt')).read() 18 19 requires = [ 20 'pyramid', 21 'pyramid_mako', 22 'pyramid_debugtoolbar', 23 'pyramid_tm', 24 'waitress', 25 'colander', 26 'cornice', 27 28 'python-openid', 29 'pyramid_fas_openid', 30 'packagedb-cli', 31 32 'sqlalchemy', 33 'zope.sqlalchemy', 34 35 'webhelpers', 36 'progressbar', 37 38 'bunch', 39 40 # for captchas 41 'cryptography', 42 'Pillow', 43 44 # Useful tools 45 'kitchen', 46 'python-fedora', 47 'pylibravatar', 48 'pyDNS', 49 'dogpile.cache', 50 'arrow', 51 'markdown', 52 53 # i18n, that we're not actually doing yet. 54 #'Babel', 55 #'lingua', 56 57 # External resources 58 'python-bugzilla', 59 'simplemediawiki', 60 'fedmsg', 61 62 'Sphinx', 63 64 # For the bodhi-client 65 'click', 66 67 'WebOb>=1.4.1', 68 ] 69 70 if sys.version_info[:3] < (2,7,0): 71 requires.append('importlib') 72 73 if sys.version_info[:3] < (2,5,0): 74 requires.append('pysqlite') 75 76 setup(name='bodhi', 77 version='2.0', 78 description='bodhi', 79 long_description=README + '\n\n' + CHANGES, 80 classifiers=[ 81 "Programming Language :: Python", 82 "Framework :: Pyramid", 83 "Topic :: Internet :: WWW/HTTP", 84 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", 85 ], 86 author='', 87 author_email='', 88 url='', 89 keywords='web fedora pyramid', 90 packages=find_packages(), 91 include_package_data=True, 92 zip_safe=False, 93 install_requires = requires, 94 tests_require = [ 95 'nose', 96 'nose-cov', 97 'webtest', 98 'mock' 99 ], 100 test_suite="nose.collector", 101 message_extractors = { '.': [ 102 #('**.py', 'lingua_python', None), 103 #('**.mak', 'lingua_xml', None), 104 ]}, 105 entry_points = """\ 106 [paste.app_factory] 107 main = bodhi:main 108 [console_scripts] 109 initialize_bodhi_db = bodhi.scripts.initializedb:main 110 bodhi = bodhi.cli:cli 111 bodhi-push = bodhi.push:push 112 bodhi-expire-overrides = bodhi.scripts.expire_overrides:main 113 [moksha.consumer] 114 masher = bodhi.consumers.masher:Masher 115 updates = bodhi.consumers.updates:UpdatesHandler 116 """, 117 paster_plugins=['pyramid'], 118 ) 119 120 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -57,7 +57,9 @@ # External resources 'python-bugzilla', 'simplemediawiki', - 'fedmsg', + + # "python setup.py test" needs one of fedmsg's setup.py extra_requires + 'fedmsg[consumers]', 'Sphinx',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,7 +57,9 @@\n # External resources\n 'python-bugzilla',\n 'simplemediawiki',\n- 'fedmsg',\n+\n+ # \"python setup.py test\" needs one of fedmsg's setup.py extra_requires\n+ 'fedmsg[consumers]',\n \n 'Sphinx',\n", "issue": "setup.py test doesn't include extra_requires from fedmsg deps\n```\n======================================================================\nERROR: Failure: ImportError (No module named psutil)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/home/decause/.virtualenvs/bodhi-python2.7/lib/python2.7/site-packages/nose/loader.py\", line 418, in loadTestsFromName\n addr.filename, addr.module)\n File \"/home/decause/.virtualenvs/bodhi-python2.7/lib/python2.7/site-packages/nose/importer.py\", line 47, in importFromPath\n return self.importFromDir(dir_path, fqname)\n File \"/home/decause/.virtualenvs/bodhi-python2.7/lib/python2.7/site-packages/nose/importer.py\", line 94, in importFromDir\n mod = load_module(part_fqname, fh, filename, desc)\n File \"/home/decause/code/bodhi/bodhi/tests/test_masher.py\", line 27, in <module>\n from bodhi.consumers.masher import Masher, MasherThread\n File \"/home/decause/code/bodhi/bodhi/consumers/masher.py\", line 30, in <module>\n import fedmsg.consumers\n File \"/home/decause/code/bodhi/.eggs/fedmsg-0.16.0-py2.7.egg/fedmsg/consumers/__init__.py\", line 25, in <module>\n import psutil\nImportError: No module named psutil\n\n----------------------------------------------------------------------\nRan 335 tests in 138.787s\n\nFAILED (errors=1)\n```\n\n", "before_files": [{"content": "import __main__\n__requires__ = __main__.__requires__ = 'WebOb>=1.4.1'\nimport pkg_resources\n\n# The following two imports are required to shut up an\n# atexit error when running tests with python 2.7\nimport logging\nimport multiprocessing\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.rst')).read()\nCHANGES = open(os.path.join(here, 'CHANGES.txt')).read()\n\nrequires = [\n 'pyramid',\n 'pyramid_mako',\n 'pyramid_debugtoolbar',\n 'pyramid_tm',\n 'waitress',\n 'colander',\n 'cornice',\n\n 'python-openid',\n 'pyramid_fas_openid',\n 'packagedb-cli',\n\n 'sqlalchemy',\n 'zope.sqlalchemy',\n\n 'webhelpers',\n 'progressbar',\n\n 'bunch',\n\n # for captchas\n 'cryptography',\n 'Pillow',\n\n # Useful tools\n 'kitchen',\n 'python-fedora',\n 'pylibravatar',\n 'pyDNS',\n 'dogpile.cache',\n 'arrow',\n 'markdown',\n\n # i18n, that we're not actually doing yet.\n #'Babel',\n #'lingua',\n\n # External resources\n 'python-bugzilla',\n 'simplemediawiki',\n 'fedmsg',\n\n 'Sphinx',\n\n # For the bodhi-client\n 'click',\n\n 'WebOb>=1.4.1',\n ]\n\nif sys.version_info[:3] < (2,7,0):\n requires.append('importlib')\n\nif sys.version_info[:3] < (2,5,0):\n requires.append('pysqlite')\n\nsetup(name='bodhi',\n version='2.0',\n description='bodhi',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n author='',\n author_email='',\n url='',\n keywords='web fedora pyramid',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = requires,\n tests_require = [\n 'nose',\n 'nose-cov',\n 'webtest',\n 'mock'\n ],\n test_suite=\"nose.collector\",\n message_extractors = { '.': [\n #('**.py', 'lingua_python', None),\n #('**.mak', 'lingua_xml', None),\n ]},\n entry_points = \"\"\"\\\n [paste.app_factory]\n main = bodhi:main\n [console_scripts]\n initialize_bodhi_db = bodhi.scripts.initializedb:main\n bodhi = bodhi.cli:cli\n bodhi-push = bodhi.push:push\n bodhi-expire-overrides = bodhi.scripts.expire_overrides:main\n [moksha.consumer]\n masher = bodhi.consumers.masher:Masher\n updates = bodhi.consumers.updates:UpdatesHandler\n \"\"\",\n paster_plugins=['pyramid'],\n )\n\n", "path": "setup.py"}]}
1,886
93
gh_patches_debug_25493
rasdani/github-patches
git_diff
liqd__adhocracy4-211
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Subject with new line crashes email sending Subject with new line crashes email sending </issue> <code> [start of adhocracy4/emails/mixins.py] 1 from email.mime.image import MIMEImage 2 3 from django.contrib.staticfiles import finders 4 from .base import EmailBase 5 6 7 class PlatformEmailMixin: 8 """ 9 Attaches the static file images/logo.png so it can be used in an html 10 email. 11 """ 12 def get_attachments(self): 13 attachments = super().get_attachments() 14 filename = ( 15 finders.find('images/email_logo.png') 16 or finders.find('images/email_logo.svg') 17 ) 18 if filename: 19 if filename.endswith('.png'): 20 imagetype = 'png' 21 else: 22 imagetype = 'svg+xml' 23 24 with open(filename, 'rb') as f: 25 logo = MIMEImage(f.read(), imagetype) 26 27 logo.add_header('Content-ID', '<{}>'.format('logo')) 28 return attachments + [logo] 29 return attachments 30 31 32 class SyncEmailMixin(EmailBase): 33 """Send Emails synchronously.""" 34 35 @classmethod 36 def send(cls, object, *args, **kwargs): 37 """Call dispatch immediately""" 38 return cls().dispatch(object, *args, **kwargs) 39 [end of adhocracy4/emails/mixins.py] [start of adhocracy4/emails/base.py] 1 from django.conf import settings 2 from django.contrib.contenttypes.models import ContentType 3 from django.contrib.sites import models as site_models 4 from django.core.mail.message import EmailMultiAlternatives 5 from django.template.loader import select_template 6 from django.utils import translation 7 8 from . import tasks 9 10 11 class EmailBase: 12 site_id = 1 13 object = None 14 template_name = None 15 fallback_language = 'en' 16 for_moderator = False 17 18 def get_site(self): 19 return site_models.Site.objects.get(pk=self.site_id) 20 21 def get_host(self): 22 site = self.get_site() 23 ssl_enabled = True 24 if site.domain.startswith('localhost:'): 25 ssl_enabled = False 26 27 url = 'http{ssl_flag}://{domain}'.format( 28 ssl_flag='s' if ssl_enabled else '', 29 domain=site.domain, 30 ) 31 return url 32 33 def get_context(self): 34 object_context_key = self.object.__class__.__name__.lower() 35 return { 36 'email': self, 37 'site': self.get_site(), 38 object_context_key: self.object 39 } 40 41 def get_receivers(self): 42 return [] 43 44 def get_attachments(self): 45 return [] 46 47 def get_languages(self, receiver): 48 return [translation.get_language(), self.fallback_language] 49 50 def get_reply_to(self): 51 return None 52 53 @classmethod 54 def send(cls, object, *args, **kwargs): 55 """Send email asynchronously. 56 57 NOTE: args and kwargs must be JSON serializable. 58 """ 59 ct = ContentType.objects.get_for_model(object) 60 tasks.send_async( 61 cls.__module__, cls.__name__, 62 ct.app_label, ct.model, object.pk, 63 args, kwargs) 64 return [] 65 66 def render(self, template_name, context): 67 languages = self.get_languages(context['receiver']) 68 template = select_template([ 69 '{}.{}.email'.format(template_name, lang) 70 for lang in languages 71 ]) 72 73 # Get the actually chosen language from the template name 74 language = template.template.name.split('.', 2)[-2] 75 76 with translation.override(language): 77 parts = [] 78 for part_type in ('subject', 'txt', 'html'): 79 context['part_type'] = part_type 80 parts.append(template.render(context)) 81 context.pop('part_type') 82 83 return tuple(parts) 84 85 def dispatch(self, object, *args, **kwargs): 86 self.object = object 87 self.kwargs = kwargs 88 receivers = self.get_receivers() 89 context = self.get_context() 90 context.update(kwargs) 91 attachments = self.get_attachments() 92 template = self.template_name 93 94 mails = [] 95 for receiver in receivers: 96 context['receiver'] = receiver 97 (subject, text, html) = self.render(template, context) 98 context.pop('receiver') 99 100 if hasattr(receiver, 'email'): 101 to_address = receiver.email 102 else: 103 to_address = receiver 104 105 mail = EmailMultiAlternatives( 106 subject=subject.strip(), 107 body=text, 108 from_email=settings.DEFAULT_FROM_EMAIL, 109 to=[to_address], 110 reply_to=self.get_reply_to(), 111 ) 112 113 if len(attachments) > 0: 114 mail.mixed_subtype = 'related' 115 116 for attachment in attachments: 117 mail.attach(attachment) 118 119 mail.attach_alternative(html, 'text/html') 120 mail.send() 121 mails.append(mail) 122 return mails 123 [end of adhocracy4/emails/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/adhocracy4/emails/base.py b/adhocracy4/emails/base.py --- a/adhocracy4/emails/base.py +++ b/adhocracy4/emails/base.py @@ -1,3 +1,5 @@ +import re + from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.contrib.sites import models as site_models @@ -102,8 +104,10 @@ else: to_address = receiver + subject_clean = re.sub(r'[\r\n]', '', subject).strip() + mail = EmailMultiAlternatives( - subject=subject.strip(), + subject=subject_clean, body=text, from_email=settings.DEFAULT_FROM_EMAIL, to=[to_address], diff --git a/adhocracy4/emails/mixins.py b/adhocracy4/emails/mixins.py --- a/adhocracy4/emails/mixins.py +++ b/adhocracy4/emails/mixins.py @@ -1,7 +1,6 @@ from email.mime.image import MIMEImage from django.contrib.staticfiles import finders -from .base import EmailBase class PlatformEmailMixin: @@ -29,7 +28,7 @@ return attachments -class SyncEmailMixin(EmailBase): +class SyncEmailMixin: """Send Emails synchronously.""" @classmethod
{"golden_diff": "diff --git a/adhocracy4/emails/base.py b/adhocracy4/emails/base.py\n--- a/adhocracy4/emails/base.py\n+++ b/adhocracy4/emails/base.py\n@@ -1,3 +1,5 @@\n+import re\n+\n from django.conf import settings\n from django.contrib.contenttypes.models import ContentType\n from django.contrib.sites import models as site_models\n@@ -102,8 +104,10 @@\n else:\n to_address = receiver\n \n+ subject_clean = re.sub(r'[\\r\\n]', '', subject).strip()\n+\n mail = EmailMultiAlternatives(\n- subject=subject.strip(),\n+ subject=subject_clean,\n body=text,\n from_email=settings.DEFAULT_FROM_EMAIL,\n to=[to_address],\ndiff --git a/adhocracy4/emails/mixins.py b/adhocracy4/emails/mixins.py\n--- a/adhocracy4/emails/mixins.py\n+++ b/adhocracy4/emails/mixins.py\n@@ -1,7 +1,6 @@\n from email.mime.image import MIMEImage\n \n from django.contrib.staticfiles import finders\n-from .base import EmailBase\n \n \n class PlatformEmailMixin:\n@@ -29,7 +28,7 @@\n return attachments\n \n \n-class SyncEmailMixin(EmailBase):\n+class SyncEmailMixin:\n \"\"\"Send Emails synchronously.\"\"\"\n \n @classmethod\n", "issue": "Subject with new line crashes email sending\n\nSubject with new line crashes email sending\n\n", "before_files": [{"content": "from email.mime.image import MIMEImage\n\nfrom django.contrib.staticfiles import finders\nfrom .base import EmailBase\n\n\nclass PlatformEmailMixin:\n \"\"\"\n Attaches the static file images/logo.png so it can be used in an html\n email.\n \"\"\"\n def get_attachments(self):\n attachments = super().get_attachments()\n filename = (\n finders.find('images/email_logo.png')\n or finders.find('images/email_logo.svg')\n )\n if filename:\n if filename.endswith('.png'):\n imagetype = 'png'\n else:\n imagetype = 'svg+xml'\n\n with open(filename, 'rb') as f:\n logo = MIMEImage(f.read(), imagetype)\n\n logo.add_header('Content-ID', '<{}>'.format('logo'))\n return attachments + [logo]\n return attachments\n\n\nclass SyncEmailMixin(EmailBase):\n \"\"\"Send Emails synchronously.\"\"\"\n\n @classmethod\n def send(cls, object, *args, **kwargs):\n \"\"\"Call dispatch immediately\"\"\"\n return cls().dispatch(object, *args, **kwargs)\n", "path": "adhocracy4/emails/mixins.py"}, {"content": "from django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.sites import models as site_models\nfrom django.core.mail.message import EmailMultiAlternatives\nfrom django.template.loader import select_template\nfrom django.utils import translation\n\nfrom . import tasks\n\n\nclass EmailBase:\n site_id = 1\n object = None\n template_name = None\n fallback_language = 'en'\n for_moderator = False\n\n def get_site(self):\n return site_models.Site.objects.get(pk=self.site_id)\n\n def get_host(self):\n site = self.get_site()\n ssl_enabled = True\n if site.domain.startswith('localhost:'):\n ssl_enabled = False\n\n url = 'http{ssl_flag}://{domain}'.format(\n ssl_flag='s' if ssl_enabled else '',\n domain=site.domain,\n )\n return url\n\n def get_context(self):\n object_context_key = self.object.__class__.__name__.lower()\n return {\n 'email': self,\n 'site': self.get_site(),\n object_context_key: self.object\n }\n\n def get_receivers(self):\n return []\n\n def get_attachments(self):\n return []\n\n def get_languages(self, receiver):\n return [translation.get_language(), self.fallback_language]\n\n def get_reply_to(self):\n return None\n\n @classmethod\n def send(cls, object, *args, **kwargs):\n \"\"\"Send email asynchronously.\n\n NOTE: args and kwargs must be JSON serializable.\n \"\"\"\n ct = ContentType.objects.get_for_model(object)\n tasks.send_async(\n cls.__module__, cls.__name__,\n ct.app_label, ct.model, object.pk,\n args, kwargs)\n return []\n\n def render(self, template_name, context):\n languages = self.get_languages(context['receiver'])\n template = select_template([\n '{}.{}.email'.format(template_name, lang)\n for lang in languages\n ])\n\n # Get the actually chosen language from the template name\n language = template.template.name.split('.', 2)[-2]\n\n with translation.override(language):\n parts = []\n for part_type in ('subject', 'txt', 'html'):\n context['part_type'] = part_type\n parts.append(template.render(context))\n context.pop('part_type')\n\n return tuple(parts)\n\n def dispatch(self, object, *args, **kwargs):\n self.object = object\n self.kwargs = kwargs\n receivers = self.get_receivers()\n context = self.get_context()\n context.update(kwargs)\n attachments = self.get_attachments()\n template = self.template_name\n\n mails = []\n for receiver in receivers:\n context['receiver'] = receiver\n (subject, text, html) = self.render(template, context)\n context.pop('receiver')\n\n if hasattr(receiver, 'email'):\n to_address = receiver.email\n else:\n to_address = receiver\n\n mail = EmailMultiAlternatives(\n subject=subject.strip(),\n body=text,\n from_email=settings.DEFAULT_FROM_EMAIL,\n to=[to_address],\n reply_to=self.get_reply_to(),\n )\n\n if len(attachments) > 0:\n mail.mixed_subtype = 'related'\n\n for attachment in attachments:\n mail.attach(attachment)\n\n mail.attach_alternative(html, 'text/html')\n mail.send()\n mails.append(mail)\n return mails\n", "path": "adhocracy4/emails/base.py"}]}
1,873
303
gh_patches_debug_28410
rasdani/github-patches
git_diff
mne-tools__mne-python-9092
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> split code block in examples/preprocessing/plot_virtual_evoked right now, because all plots come from a single code block, they are plotted at the top of the example in a group of 4 (and consequently the plots are really small). By splitting the 4 plotting calls into different code blocks, they will plot larger / be easier to see & compare, without increasing run time of the example. Code blocks can be split with a line of 79 `#` marks (adding a bit of explanatory text too is usually a good idea) </issue> <code> [start of examples/preprocessing/plot_virtual_evoked.py] 1 """ 2 ======================= 3 Remap MEG channel types 4 ======================= 5 6 In this example, MEG data are remapped from one channel type to another. 7 This is useful to: 8 9 - visualize combined magnetometers and gradiometers as magnetometers 10 or gradiometers. 11 - run statistics from both magnetometers and gradiometers while 12 working with a single type of channels. 13 """ 14 15 # Author: Mainak Jas <mainak.jas@telecom-paristech.fr> 16 17 # License: BSD (3-clause) 18 19 import mne 20 from mne.datasets import sample 21 22 print(__doc__) 23 24 # read the evoked 25 data_path = sample.data_path() 26 fname = data_path + '/MEG/sample/sample_audvis-ave.fif' 27 evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0)) 28 29 # go from grad + mag to mag 30 virt_evoked = evoked.as_type('mag') 31 evoked.plot_topomap(ch_type='mag', title='mag (original)', time_unit='s') 32 virt_evoked.plot_topomap(ch_type='mag', time_unit='s', 33 title='mag (interpolated from mag + grad)') 34 35 # go from grad + mag to grad 36 virt_evoked = evoked.as_type('grad') 37 evoked.plot_topomap(ch_type='grad', title='grad (original)', time_unit='s') 38 virt_evoked.plot_topomap(ch_type='grad', time_unit='s', 39 title='grad (interpolated from mag + grad)') 40 [end of examples/preprocessing/plot_virtual_evoked.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/preprocessing/plot_virtual_evoked.py b/examples/preprocessing/plot_virtual_evoked.py --- a/examples/preprocessing/plot_virtual_evoked.py +++ b/examples/preprocessing/plot_virtual_evoked.py @@ -26,14 +26,30 @@ fname = data_path + '/MEG/sample/sample_audvis-ave.fif' evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0)) -# go from grad + mag to mag +############################################################################### +# First, let's call remap gradiometers to magnometers, and plot +# the original and remapped topomaps of the magnetometers. + +# go from grad + mag to mag and plot original mag virt_evoked = evoked.as_type('mag') evoked.plot_topomap(ch_type='mag', title='mag (original)', time_unit='s') + +############################################################################### + +# plot interpolated grad + mag virt_evoked.plot_topomap(ch_type='mag', time_unit='s', title='mag (interpolated from mag + grad)') -# go from grad + mag to grad +############################################################################### +# Now, we remap magnometers to gradiometers, and plot +# the original and remapped topomaps of the gradiometers + +# go from grad + mag to grad and plot original grad virt_evoked = evoked.as_type('grad') evoked.plot_topomap(ch_type='grad', title='grad (original)', time_unit='s') + +############################################################################### + +# plot interpolated grad + mag virt_evoked.plot_topomap(ch_type='grad', time_unit='s', title='grad (interpolated from mag + grad)')
{"golden_diff": "diff --git a/examples/preprocessing/plot_virtual_evoked.py b/examples/preprocessing/plot_virtual_evoked.py\n--- a/examples/preprocessing/plot_virtual_evoked.py\n+++ b/examples/preprocessing/plot_virtual_evoked.py\n@@ -26,14 +26,30 @@\n fname = data_path + '/MEG/sample/sample_audvis-ave.fif'\n evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0))\n \n-# go from grad + mag to mag\n+###############################################################################\n+# First, let's call remap gradiometers to magnometers, and plot\n+# the original and remapped topomaps of the magnetometers.\n+\n+# go from grad + mag to mag and plot original mag\n virt_evoked = evoked.as_type('mag')\n evoked.plot_topomap(ch_type='mag', title='mag (original)', time_unit='s')\n+\n+###############################################################################\n+\n+# plot interpolated grad + mag\n virt_evoked.plot_topomap(ch_type='mag', time_unit='s',\n title='mag (interpolated from mag + grad)')\n \n-# go from grad + mag to grad\n+###############################################################################\n+# Now, we remap magnometers to gradiometers, and plot\n+# the original and remapped topomaps of the gradiometers\n+\n+# go from grad + mag to grad and plot original grad\n virt_evoked = evoked.as_type('grad')\n evoked.plot_topomap(ch_type='grad', title='grad (original)', time_unit='s')\n+\n+###############################################################################\n+\n+# plot interpolated grad + mag\n virt_evoked.plot_topomap(ch_type='grad', time_unit='s',\n title='grad (interpolated from mag + grad)')\n", "issue": "split code block in examples/preprocessing/plot_virtual_evoked\nright now, because all plots come from a single code block, they are plotted at the top of the example in a group of 4 (and consequently the plots are really small). By splitting the 4 plotting calls into different code blocks, they will plot larger / be easier to see & compare, without increasing run time of the example. Code blocks can be split with a line of 79 `#` marks (adding a bit of explanatory text too is usually a good idea)\n", "before_files": [{"content": "\"\"\"\n=======================\nRemap MEG channel types\n=======================\n\nIn this example, MEG data are remapped from one channel type to another.\nThis is useful to:\n\n - visualize combined magnetometers and gradiometers as magnetometers\n or gradiometers.\n - run statistics from both magnetometers and gradiometers while\n working with a single type of channels.\n\"\"\"\n\n# Author: Mainak Jas <mainak.jas@telecom-paristech.fr>\n\n# License: BSD (3-clause)\n\nimport mne\nfrom mne.datasets import sample\n\nprint(__doc__)\n\n# read the evoked\ndata_path = sample.data_path()\nfname = data_path + '/MEG/sample/sample_audvis-ave.fif'\nevoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0))\n\n# go from grad + mag to mag\nvirt_evoked = evoked.as_type('mag')\nevoked.plot_topomap(ch_type='mag', title='mag (original)', time_unit='s')\nvirt_evoked.plot_topomap(ch_type='mag', time_unit='s',\n title='mag (interpolated from mag + grad)')\n\n# go from grad + mag to grad\nvirt_evoked = evoked.as_type('grad')\nevoked.plot_topomap(ch_type='grad', title='grad (original)', time_unit='s')\nvirt_evoked.plot_topomap(ch_type='grad', time_unit='s',\n title='grad (interpolated from mag + grad)')\n", "path": "examples/preprocessing/plot_virtual_evoked.py"}]}
1,053
365
gh_patches_debug_57312
rasdani/github-patches
git_diff
vllm-project__vllm-402
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Input and output orders are different I was trying to do batch inference by feeding a list of texts into `llm.generate()`. But I found that the outputs have different orders from examples in the input list. For example, the result for the first example in the input list may be the second in the output list. Anything suggestions that I may have done something wrong? Thanks! </issue> <code> [start of vllm/entrypoints/llm.py] 1 from typing import List, Optional, Union 2 3 from tqdm import tqdm 4 from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast 5 6 from vllm.engine.arg_utils import EngineArgs 7 from vllm.engine.llm_engine import LLMEngine 8 from vllm.outputs import RequestOutput 9 from vllm.sampling_params import SamplingParams 10 from vllm.utils import Counter 11 12 13 class LLM: 14 """An LLM for generating texts from given prompts and sampling parameters. 15 16 This class includes a tokenizer, a language model (possibly distributed 17 across multiple GPUs), and GPU memory space allocated for intermediate 18 states (aka KV cache). Given a batch of prompts and sampling parameters, 19 this class generates texts from the model, using an intelligent batching 20 mechanism and efficient memory management. 21 22 NOTE: This class is intended to be used for offline inference. For online 23 serving, use the `AsyncLLMEngine` class instead. 24 NOTE: For the comprehensive list of arguments, see `EngineArgs`. 25 26 Args: 27 model: The name or path of a HuggingFace Transformers model. 28 tokenizer: The name or path of a HuggingFace Transformers tokenizer. 29 tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer 30 if available, and "slow" will always use the slow tokenizer. 31 trust_remote_code: Trust remote code (e.g., from HuggingFace) when 32 downloading the model and tokenizer. 33 tensor_parallel_size: The number of GPUs to use for distributed 34 execution with tensor parallelism. 35 dtype: The data type for the model weights and activations. Currently, 36 we support `float32`, `float16`, and `bfloat16`. If `auto`, we use 37 the `torch_dtype` attribute specified in the model config file. 38 However, if the `torch_dtype` in the config is `float32`, we will 39 use `float16` instead. 40 seed: The seed to initialize the random number generator for sampling. 41 """ 42 43 def __init__( 44 self, 45 model: str, 46 tokenizer: Optional[str] = None, 47 tokenizer_mode: str = "auto", 48 trust_remote_code: bool = False, 49 tensor_parallel_size: int = 1, 50 dtype: str = "auto", 51 seed: int = 0, 52 **kwargs, 53 ) -> None: 54 if "disable_log_stats" not in kwargs: 55 kwargs["disable_log_stats"] = True 56 engine_args = EngineArgs( 57 model=model, 58 tokenizer=tokenizer, 59 tokenizer_mode=tokenizer_mode, 60 trust_remote_code=trust_remote_code, 61 tensor_parallel_size=tensor_parallel_size, 62 dtype=dtype, 63 seed=seed, 64 **kwargs, 65 ) 66 self.llm_engine = LLMEngine.from_engine_args(engine_args) 67 self.request_counter = Counter() 68 69 def get_tokenizer( 70 self) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]: 71 return self.llm_engine.tokenizer 72 73 def set_tokenizer( 74 self, 75 tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], 76 ) -> None: 77 self.llm_engine.tokenizer = tokenizer 78 79 def generate( 80 self, 81 prompts: Optional[Union[str, List[str]]] = None, 82 sampling_params: Optional[SamplingParams] = None, 83 prompt_token_ids: Optional[List[List[int]]] = None, 84 use_tqdm: bool = True, 85 ) -> List[RequestOutput]: 86 """Generates the completions for the input prompts. 87 88 NOTE: This class automatically batches the given prompts, considering 89 the memory constraint. For the best performance, put all of your prompts 90 into a single list and pass it to this method. 91 92 Args: 93 prompts: A list of prompts to generate completions for. 94 sampling_params: The sampling parameters for text generation. If 95 None, we use the default sampling parameters. 96 prompt_token_ids: A list of token IDs for the prompts. If None, we 97 use the tokenizer to convert the prompts to token IDs. 98 use_tqdm: Whether to use tqdm to display the progress bar. 99 100 Returns: 101 A list of `RequestOutput` objects containing the generated 102 completions in the same order as the input prompts. 103 """ 104 if prompts is None and prompt_token_ids is None: 105 raise ValueError("Either prompts or prompt_token_ids must be " 106 "provided.") 107 if isinstance(prompts, str): 108 # Convert a single prompt to a list. 109 prompts = [prompts] 110 if prompts is not None and prompt_token_ids is not None: 111 if len(prompts) != len(prompt_token_ids): 112 raise ValueError("The lengths of prompts and prompt_token_ids " 113 "must be the same.") 114 if sampling_params is None: 115 # Use default sampling params. 116 sampling_params = SamplingParams() 117 118 # Add requests to the engine. 119 if prompts is not None: 120 num_requests = len(prompts) 121 else: 122 num_requests = len(prompt_token_ids) 123 for i in range(num_requests): 124 prompt = prompts[i] if prompts is not None else None 125 if prompt_token_ids is None: 126 token_ids = None 127 else: 128 token_ids = prompt_token_ids[i] 129 self._add_request(prompt, sampling_params, token_ids) 130 return self._run_engine(use_tqdm) 131 132 def _add_request( 133 self, 134 prompt: Optional[str], 135 sampling_params: SamplingParams, 136 prompt_token_ids: Optional[List[int]], 137 ) -> None: 138 request_id = str(next(self.request_counter)) 139 self.llm_engine.add_request(request_id, prompt, sampling_params, 140 prompt_token_ids) 141 142 def _run_engine(self, use_tqdm: bool) -> List[RequestOutput]: 143 # Initialize tqdm. 144 if use_tqdm: 145 num_requests = self.llm_engine.get_num_unfinished_requests() 146 pbar = tqdm(total=num_requests, desc="Processed prompts") 147 # Run the engine. 148 outputs: List[RequestOutput] = [] 149 while self.llm_engine.has_unfinished_requests(): 150 step_outputs = self.llm_engine.step() 151 for output in step_outputs: 152 if output.finished: 153 outputs.append(output) 154 if use_tqdm: 155 pbar.update(1) 156 if use_tqdm: 157 pbar.close() 158 return outputs 159 [end of vllm/entrypoints/llm.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -155,4 +155,8 @@ pbar.update(1) if use_tqdm: pbar.close() + # Sort the outputs by request ID. + # This is necessary because some requests may be finished earlier than + # its previous requests. + outputs = sorted(outputs, key=lambda x: int(x.request_id)) return outputs
{"golden_diff": "diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py\n--- a/vllm/entrypoints/llm.py\n+++ b/vllm/entrypoints/llm.py\n@@ -155,4 +155,8 @@\n pbar.update(1)\n if use_tqdm:\n pbar.close()\n+ # Sort the outputs by request ID.\n+ # This is necessary because some requests may be finished earlier than\n+ # its previous requests.\n+ outputs = sorted(outputs, key=lambda x: int(x.request_id))\n return outputs\n", "issue": "Input and output orders are different\nI was trying to do batch inference by feeding a list of texts into `llm.generate()`. But I found that the outputs have different orders from examples in the input list. For example, the result for the first example in the input list may be the second in the output list. Anything suggestions that I may have done something wrong? Thanks!\n", "before_files": [{"content": "from typing import List, Optional, Union\n\nfrom tqdm import tqdm\nfrom transformers import PreTrainedTokenizer, PreTrainedTokenizerFast\n\nfrom vllm.engine.arg_utils import EngineArgs\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.outputs import RequestOutput\nfrom vllm.sampling_params import SamplingParams\nfrom vllm.utils import Counter\n\n\nclass LLM:\n \"\"\"An LLM for generating texts from given prompts and sampling parameters.\n\n This class includes a tokenizer, a language model (possibly distributed\n across multiple GPUs), and GPU memory space allocated for intermediate\n states (aka KV cache). Given a batch of prompts and sampling parameters,\n this class generates texts from the model, using an intelligent batching\n mechanism and efficient memory management.\n\n NOTE: This class is intended to be used for offline inference. For online\n serving, use the `AsyncLLMEngine` class instead.\n NOTE: For the comprehensive list of arguments, see `EngineArgs`.\n\n Args:\n model: The name or path of a HuggingFace Transformers model.\n tokenizer: The name or path of a HuggingFace Transformers tokenizer.\n tokenizer_mode: The tokenizer mode. \"auto\" will use the fast tokenizer\n if available, and \"slow\" will always use the slow tokenizer.\n trust_remote_code: Trust remote code (e.g., from HuggingFace) when\n downloading the model and tokenizer.\n tensor_parallel_size: The number of GPUs to use for distributed\n execution with tensor parallelism.\n dtype: The data type for the model weights and activations. Currently,\n we support `float32`, `float16`, and `bfloat16`. If `auto`, we use\n the `torch_dtype` attribute specified in the model config file.\n However, if the `torch_dtype` in the config is `float32`, we will\n use `float16` instead.\n seed: The seed to initialize the random number generator for sampling.\n \"\"\"\n\n def __init__(\n self,\n model: str,\n tokenizer: Optional[str] = None,\n tokenizer_mode: str = \"auto\",\n trust_remote_code: bool = False,\n tensor_parallel_size: int = 1,\n dtype: str = \"auto\",\n seed: int = 0,\n **kwargs,\n ) -> None:\n if \"disable_log_stats\" not in kwargs:\n kwargs[\"disable_log_stats\"] = True\n engine_args = EngineArgs(\n model=model,\n tokenizer=tokenizer,\n tokenizer_mode=tokenizer_mode,\n trust_remote_code=trust_remote_code,\n tensor_parallel_size=tensor_parallel_size,\n dtype=dtype,\n seed=seed,\n **kwargs,\n )\n self.llm_engine = LLMEngine.from_engine_args(engine_args)\n self.request_counter = Counter()\n\n def get_tokenizer(\n self) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:\n return self.llm_engine.tokenizer\n\n def set_tokenizer(\n self,\n tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],\n ) -> None:\n self.llm_engine.tokenizer = tokenizer\n\n def generate(\n self,\n prompts: Optional[Union[str, List[str]]] = None,\n sampling_params: Optional[SamplingParams] = None,\n prompt_token_ids: Optional[List[List[int]]] = None,\n use_tqdm: bool = True,\n ) -> List[RequestOutput]:\n \"\"\"Generates the completions for the input prompts.\n\n NOTE: This class automatically batches the given prompts, considering\n the memory constraint. For the best performance, put all of your prompts\n into a single list and pass it to this method.\n\n Args:\n prompts: A list of prompts to generate completions for.\n sampling_params: The sampling parameters for text generation. If\n None, we use the default sampling parameters.\n prompt_token_ids: A list of token IDs for the prompts. If None, we\n use the tokenizer to convert the prompts to token IDs.\n use_tqdm: Whether to use tqdm to display the progress bar.\n\n Returns:\n A list of `RequestOutput` objects containing the generated\n completions in the same order as the input prompts.\n \"\"\"\n if prompts is None and prompt_token_ids is None:\n raise ValueError(\"Either prompts or prompt_token_ids must be \"\n \"provided.\")\n if isinstance(prompts, str):\n # Convert a single prompt to a list.\n prompts = [prompts]\n if prompts is not None and prompt_token_ids is not None:\n if len(prompts) != len(prompt_token_ids):\n raise ValueError(\"The lengths of prompts and prompt_token_ids \"\n \"must be the same.\")\n if sampling_params is None:\n # Use default sampling params.\n sampling_params = SamplingParams()\n\n # Add requests to the engine.\n if prompts is not None:\n num_requests = len(prompts)\n else:\n num_requests = len(prompt_token_ids)\n for i in range(num_requests):\n prompt = prompts[i] if prompts is not None else None\n if prompt_token_ids is None:\n token_ids = None\n else:\n token_ids = prompt_token_ids[i]\n self._add_request(prompt, sampling_params, token_ids)\n return self._run_engine(use_tqdm)\n\n def _add_request(\n self,\n prompt: Optional[str],\n sampling_params: SamplingParams,\n prompt_token_ids: Optional[List[int]],\n ) -> None:\n request_id = str(next(self.request_counter))\n self.llm_engine.add_request(request_id, prompt, sampling_params,\n prompt_token_ids)\n\n def _run_engine(self, use_tqdm: bool) -> List[RequestOutput]:\n # Initialize tqdm.\n if use_tqdm:\n num_requests = self.llm_engine.get_num_unfinished_requests()\n pbar = tqdm(total=num_requests, desc=\"Processed prompts\")\n # Run the engine.\n outputs: List[RequestOutput] = []\n while self.llm_engine.has_unfinished_requests():\n step_outputs = self.llm_engine.step()\n for output in step_outputs:\n if output.finished:\n outputs.append(output)\n if use_tqdm:\n pbar.update(1)\n if use_tqdm:\n pbar.close()\n return outputs\n", "path": "vllm/entrypoints/llm.py"}]}
2,351
137
gh_patches_debug_157
rasdani/github-patches
git_diff
doccano__doccano-1907
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cannot access Django admin panel in a Heroku deployment How to reproduce the behaviour --------- The FAQ describes how to [create a user via the Django admin panel](https://github.com/doccano/doccano/blob/master/docs/faq.md#how-to-create-a-user) for a locally hosted Doccano. When run locally, I have no problem to reach the admin panel on `http://localhost:8000/admin/`, in Heroku however it is not working. I have tried to reach it on - `https://mydeployment.herokuapp.com/admin/` - `https://mydeployment.herokuapp.com/admin/login` - `https://mydeployment.herokuapp.com/admin/login/` - `http://mydeployment.herokuapp.com/admin/` Those urls all result in a `500 Internal Server Error`. Am I missing something here, or is this perhaps a bug? Your Environment --------- <!-- Include details of your environment. --> * Operating System: - * Python Version Used: - * When did you install doccano: A few days ago * How did you install doccano (Heroku button etc): Heroku button </issue> <code> [start of backend/config/settings/heroku.py] 1 import django_heroku 2 3 from .base import * # noqa: F401,F403 4 5 django_heroku.settings(locals(), test_runner=False) 6 [end of backend/config/settings/heroku.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend/config/settings/heroku.py b/backend/config/settings/heroku.py --- a/backend/config/settings/heroku.py +++ b/backend/config/settings/heroku.py @@ -2,4 +2,4 @@ from .base import * # noqa: F401,F403 -django_heroku.settings(locals(), test_runner=False) +django_heroku.settings(locals(), test_runner=False, staticfiles=False)
{"golden_diff": "diff --git a/backend/config/settings/heroku.py b/backend/config/settings/heroku.py\n--- a/backend/config/settings/heroku.py\n+++ b/backend/config/settings/heroku.py\n@@ -2,4 +2,4 @@\n \n from .base import * # noqa: F401,F403\n \n-django_heroku.settings(locals(), test_runner=False)\n+django_heroku.settings(locals(), test_runner=False, staticfiles=False)\n", "issue": "Cannot access Django admin panel in a Heroku deployment\nHow to reproduce the behaviour\r\n---------\r\nThe FAQ describes how to [create a user via the Django admin panel](https://github.com/doccano/doccano/blob/master/docs/faq.md#how-to-create-a-user) for a locally hosted Doccano. When run locally, I have no problem to reach the admin panel on `http://localhost:8000/admin/`, in Heroku however it is not working.\r\n\r\nI have tried to reach it on\r\n- `https://mydeployment.herokuapp.com/admin/`\r\n- `https://mydeployment.herokuapp.com/admin/login`\r\n- `https://mydeployment.herokuapp.com/admin/login/`\r\n- `http://mydeployment.herokuapp.com/admin/`\r\n\r\nThose urls all result in a `500 Internal Server Error`.\r\nAm I missing something here, or is this perhaps a bug?\r\n\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment. -->\r\n\r\n* Operating System: -\r\n* Python Version Used: -\r\n* When did you install doccano: A few days ago\r\n* How did you install doccano (Heroku button etc): Heroku button\r\n\n", "before_files": [{"content": "import django_heroku\n\nfrom .base import * # noqa: F401,F403\n\ndjango_heroku.settings(locals(), test_runner=False)\n", "path": "backend/config/settings/heroku.py"}]}
822
95
gh_patches_debug_4139
rasdani/github-patches
git_diff
ephios-dev__ephios-364
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add caching As of #278 some settings reside in the database, so we should defintely cache database requests. </issue> <code> [start of ephios/settings.py] 1 import copy 2 import os 3 from email.utils import getaddresses 4 from importlib import metadata 5 6 import environ 7 from django.contrib.messages import constants 8 9 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 10 11 env = environ.Env() 12 # for syntax see https://django-environ.readthedocs.io/en/latest/ 13 environ.Env.read_env(env_file=os.path.join(BASE_DIR, ".env")) 14 15 DATA_DIR = env.str("DATA_DIR", default=os.path.join(BASE_DIR, "data")) 16 if not os.path.exists(DATA_DIR): 17 os.mkdir(DATA_DIR) 18 19 SECRET_KEY = env.str("SECRET_KEY") 20 DEBUG = env.bool("DEBUG") 21 ALLOWED_HOSTS = env.list("ALLOWED_HOSTS") 22 SITE_URL = env.str("SITE_URL") 23 if SITE_URL.endswith("/"): 24 SITE_URL = SITE_URL[:-1] 25 26 if not DEBUG: 27 SESSION_COOKIE_SECURE = True 28 CSRF_COOKIE_SECURE = True 29 X_FRAME_OPTIONS = "DENY" 30 SECURE_CONTENT_TYPE_NOSNIFF = True 31 SECURE_BROWSER_XSS_FILTER = True 32 SECURE_SSL_REDIRECT = True 33 SECURE_HSTS_SECONDS = 3600 34 SECURE_HSTS_INCLUDE_SUBDOMAINS = True 35 SECURE_REFERRER_POLICY = "same-origin" 36 37 # Application definition 38 39 INSTALLED_APPS = [ 40 "django.contrib.admin", 41 "django.contrib.auth", 42 "polymorphic", 43 "django.contrib.contenttypes", 44 "django.contrib.sessions", 45 "django.contrib.messages", 46 "django.contrib.staticfiles", 47 "guardian", 48 "django_select2", 49 "djangoformsetjs", 50 "compressor", 51 "recurrence", 52 "statici18n", 53 "dynamic_preferences.users.apps.UserPreferencesConfig", 54 "crispy_forms", 55 ] 56 57 EPHIOS_CORE_MODULES = [ 58 "ephios.core", 59 "ephios.extra", 60 ] 61 INSTALLED_APPS += EPHIOS_CORE_MODULES 62 63 CORE_PLUGINS = [ 64 "ephios.plugins.basesignup", 65 "ephios.plugins.pages", 66 "ephios.plugins.guests", 67 ] 68 PLUGINS = copy.copy(CORE_PLUGINS) 69 for ep in metadata.entry_points().get("ephios.plugins", []): 70 PLUGINS.append(ep.module) 71 72 INSTALLED_APPS += PLUGINS 73 74 INSTALLED_APPS += ["dynamic_preferences"] # must come after our apps to collect preferences 75 76 MIDDLEWARE = [ 77 "django.middleware.security.SecurityMiddleware", 78 "django.contrib.sessions.middleware.SessionMiddleware", 79 "django.middleware.common.CommonMiddleware", 80 "django.middleware.csrf.CsrfViewMiddleware", 81 "django.contrib.auth.middleware.AuthenticationMiddleware", 82 "django.contrib.messages.middleware.MessageMiddleware", 83 "django.middleware.clickjacking.XFrameOptionsMiddleware", 84 "csp.middleware.CSPMiddleware", 85 ] 86 87 ROOT_URLCONF = "ephios.urls" 88 89 TEMPLATES = [ 90 { 91 "BACKEND": "django.template.backends.django.DjangoTemplates", 92 "DIRS": [os.path.join(BASE_DIR, "ephios/templates")], 93 "APP_DIRS": True, 94 "OPTIONS": { 95 "context_processors": [ 96 "django.template.context_processors.debug", 97 "django.template.context_processors.request", 98 "django.contrib.auth.context_processors.auth", 99 "django.contrib.messages.context_processors.messages", 100 "django.template.context_processors.i18n", 101 "dynamic_preferences.processors.global_preferences", 102 "ephios.core.context.ephios_base_context", 103 ], 104 }, 105 }, 106 ] 107 108 LOCALE_PATHS = (os.path.join(BASE_DIR, "ephios/locale"),) 109 110 WSGI_APPLICATION = "ephios.wsgi.application" 111 112 # Database 113 # https://docs.djangoproject.com/en/3.0/ref/settings/#databases 114 115 DATABASES = {"default": env.db_url()} 116 117 # Password validation 118 # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators 119 120 AUTH_PASSWORD_VALIDATORS = [ 121 { 122 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", 123 }, 124 { 125 "NAME": "ephios.core.utils.CustomMinimumLengthValidator", 126 }, 127 { 128 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", 129 }, 130 { 131 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", 132 }, 133 ] 134 135 AUTHENTICATION_BACKENDS = ( 136 "django.contrib.auth.backends.ModelBackend", 137 "guardian.backends.ObjectPermissionBackend", 138 ) 139 140 AUTH_USER_MODEL = "core.UserProfile" 141 LOGIN_REDIRECT_URL = "/" 142 PASSWORD_RESET_TIMEOUT = 28 * 24 * 60 * 60 # seconds 143 144 # Internationalization 145 # https://docs.djangoproject.com/en/3.0/topics/i18n/ 146 147 LANGUAGE_CODE = "de" 148 149 TIME_ZONE = "Europe/Berlin" 150 151 USE_I18N = True 152 153 USE_L10N = True 154 155 USE_TZ = True 156 157 # Static files (CSS, JavaScript, Images) 158 # https://docs.djangoproject.com/en/3.0/howto/static-files/ 159 160 STATIC_URL = env.str("STATIC_URL") 161 STATIC_ROOT = env.str("STATIC_ROOT") 162 STATICFILES_DIRS = (os.path.join(BASE_DIR, "ephios/static"),) 163 STATICFILES_FINDERS = ( 164 "django.contrib.staticfiles.finders.FileSystemFinder", 165 "django.contrib.staticfiles.finders.AppDirectoriesFinder", 166 "compressor.finders.CompressorFinder", 167 ) 168 COMPRESS_ENABLED = not DEBUG 169 170 # mail configuration 171 EMAIL_CONFIG = env.email_url("EMAIL_URL") 172 vars().update(EMAIL_CONFIG) 173 DEFAULT_FROM_EMAIL = env.str("DEFAULT_FROM_EMAIL") 174 SERVER_EMAIL = env.str("SERVER_EMAIL") 175 ADMINS = getaddresses([env("ADMINS")]) 176 177 # Guardian configuration 178 ANONYMOUS_USER_NAME = None 179 GUARDIAN_MONKEY_PATCH = False 180 181 # django-select2 182 # Prevent django-select from loading the select2 resources as we want to serve them locally 183 SELECT2_JS = "" 184 SELECT2_CSS = "" 185 SELECT2_I18N_PATH = "" 186 187 # django-debug-toolbar 188 if DEBUG: 189 INSTALLED_APPS.append("django_extensions") 190 INSTALLED_APPS.append("debug_toolbar") 191 MIDDLEWARE.insert(0, "debug_toolbar.middleware.DebugToolbarMiddleware") 192 INTERNAL_IPS = env.str("INTERNAL_IPS") 193 194 # django-csp 195 # Bootstrap requires embedded SVG files loaded via a data URI. This is not ideal, but will only be fixed in 196 # bootstrap v5 or v6. See https://github.com/twbs/bootstrap/issues/25394 for details on the problem and 197 # https://security.stackexchange.com/a/167244 on why allowing data: is considered okay 198 CSP_IMG_SRC = ("'self'", "data:") 199 200 # django-crispy-forms 201 CRISPY_TEMPLATE_PACK = "bootstrap4" 202 203 # django.contrib.messages 204 MESSAGE_TAGS = { 205 constants.DEBUG: "alert-info", 206 constants.INFO: "alert-info", 207 constants.SUCCESS: "alert-success", 208 constants.WARNING: "alert-warning", 209 constants.ERROR: "alert-danger", 210 } 211 212 # PWA 213 PWA_APP_ICONS = [ 214 {"src": "/static/ephios/img/ephios-192x.png", "sizes": "192x192", "purpose": "any maskable"}, 215 {"src": "/static/ephios/img/ephios-512x.png", "sizes": "512x512", "purpose": "any maskable"}, 216 {"src": "/static/ephios/img/ephios-1024x.png", "sizes": "1024x1024", "purpose": "any maskable"}, 217 ] 218 [end of ephios/settings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ephios/settings.py b/ephios/settings.py --- a/ephios/settings.py +++ b/ephios/settings.py @@ -114,6 +114,12 @@ DATABASES = {"default": env.db_url()} +# Caches +CACHES = {"default": env.cache_url(default="locmemcache://")} +SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" +DJANGO_REDIS_IGNORE_EXCEPTIONS = True +DJANGO_REDIS_LOG_IGNORED_EXCEPTIONS = True + # Password validation # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
{"golden_diff": "diff --git a/ephios/settings.py b/ephios/settings.py\n--- a/ephios/settings.py\n+++ b/ephios/settings.py\n@@ -114,6 +114,12 @@\n \n DATABASES = {\"default\": env.db_url()}\n \n+# Caches\n+CACHES = {\"default\": env.cache_url(default=\"locmemcache://\")}\n+SESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n+DJANGO_REDIS_IGNORE_EXCEPTIONS = True\n+DJANGO_REDIS_LOG_IGNORED_EXCEPTIONS = True\n+\n # Password validation\n # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n", "issue": "Add caching\nAs of #278 some settings reside in the database, so we should defintely cache database requests.\n", "before_files": [{"content": "import copy\nimport os\nfrom email.utils import getaddresses\nfrom importlib import metadata\n\nimport environ\nfrom django.contrib.messages import constants\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nenv = environ.Env()\n# for syntax see https://django-environ.readthedocs.io/en/latest/\nenviron.Env.read_env(env_file=os.path.join(BASE_DIR, \".env\"))\n\nDATA_DIR = env.str(\"DATA_DIR\", default=os.path.join(BASE_DIR, \"data\"))\nif not os.path.exists(DATA_DIR):\n os.mkdir(DATA_DIR)\n\nSECRET_KEY = env.str(\"SECRET_KEY\")\nDEBUG = env.bool(\"DEBUG\")\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\")\nSITE_URL = env.str(\"SITE_URL\")\nif SITE_URL.endswith(\"/\"):\n SITE_URL = SITE_URL[:-1]\n\nif not DEBUG:\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n X_FRAME_OPTIONS = \"DENY\"\n SECURE_CONTENT_TYPE_NOSNIFF = True\n SECURE_BROWSER_XSS_FILTER = True\n SECURE_SSL_REDIRECT = True\n SECURE_HSTS_SECONDS = 3600\n SECURE_HSTS_INCLUDE_SUBDOMAINS = True\n SECURE_REFERRER_POLICY = \"same-origin\"\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"polymorphic\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"guardian\",\n \"django_select2\",\n \"djangoformsetjs\",\n \"compressor\",\n \"recurrence\",\n \"statici18n\",\n \"dynamic_preferences.users.apps.UserPreferencesConfig\",\n \"crispy_forms\",\n]\n\nEPHIOS_CORE_MODULES = [\n \"ephios.core\",\n \"ephios.extra\",\n]\nINSTALLED_APPS += EPHIOS_CORE_MODULES\n\nCORE_PLUGINS = [\n \"ephios.plugins.basesignup\",\n \"ephios.plugins.pages\",\n \"ephios.plugins.guests\",\n]\nPLUGINS = copy.copy(CORE_PLUGINS)\nfor ep in metadata.entry_points().get(\"ephios.plugins\", []):\n PLUGINS.append(ep.module)\n\nINSTALLED_APPS += PLUGINS\n\nINSTALLED_APPS += [\"dynamic_preferences\"] # must come after our apps to collect preferences\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n]\n\nROOT_URLCONF = \"ephios.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"ephios/templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.template.context_processors.i18n\",\n \"dynamic_preferences.processors.global_preferences\",\n \"ephios.core.context.ephios_base_context\",\n ],\n },\n },\n]\n\nLOCALE_PATHS = (os.path.join(BASE_DIR, \"ephios/locale\"),)\n\nWSGI_APPLICATION = \"ephios.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\"default\": env.db_url()}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"ephios.core.utils.CustomMinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nAUTHENTICATION_BACKENDS = (\n \"django.contrib.auth.backends.ModelBackend\",\n \"guardian.backends.ObjectPermissionBackend\",\n)\n\nAUTH_USER_MODEL = \"core.UserProfile\"\nLOGIN_REDIRECT_URL = \"/\"\nPASSWORD_RESET_TIMEOUT = 28 * 24 * 60 * 60 # seconds\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = \"de\"\n\nTIME_ZONE = \"Europe/Berlin\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.0/howto/static-files/\n\nSTATIC_URL = env.str(\"STATIC_URL\")\nSTATIC_ROOT = env.str(\"STATIC_ROOT\")\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"ephios/static\"),)\nSTATICFILES_FINDERS = (\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n \"compressor.finders.CompressorFinder\",\n)\nCOMPRESS_ENABLED = not DEBUG\n\n# mail configuration\nEMAIL_CONFIG = env.email_url(\"EMAIL_URL\")\nvars().update(EMAIL_CONFIG)\nDEFAULT_FROM_EMAIL = env.str(\"DEFAULT_FROM_EMAIL\")\nSERVER_EMAIL = env.str(\"SERVER_EMAIL\")\nADMINS = getaddresses([env(\"ADMINS\")])\n\n# Guardian configuration\nANONYMOUS_USER_NAME = None\nGUARDIAN_MONKEY_PATCH = False\n\n# django-select2\n# Prevent django-select from loading the select2 resources as we want to serve them locally\nSELECT2_JS = \"\"\nSELECT2_CSS = \"\"\nSELECT2_I18N_PATH = \"\"\n\n# django-debug-toolbar\nif DEBUG:\n INSTALLED_APPS.append(\"django_extensions\")\n INSTALLED_APPS.append(\"debug_toolbar\")\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n INTERNAL_IPS = env.str(\"INTERNAL_IPS\")\n\n# django-csp\n# Bootstrap requires embedded SVG files loaded via a data URI. This is not ideal, but will only be fixed in\n# bootstrap v5 or v6. See https://github.com/twbs/bootstrap/issues/25394 for details on the problem and\n# https://security.stackexchange.com/a/167244 on why allowing data: is considered okay\nCSP_IMG_SRC = (\"'self'\", \"data:\")\n\n# django-crispy-forms\nCRISPY_TEMPLATE_PACK = \"bootstrap4\"\n\n# django.contrib.messages\nMESSAGE_TAGS = {\n constants.DEBUG: \"alert-info\",\n constants.INFO: \"alert-info\",\n constants.SUCCESS: \"alert-success\",\n constants.WARNING: \"alert-warning\",\n constants.ERROR: \"alert-danger\",\n}\n\n# PWA\nPWA_APP_ICONS = [\n {\"src\": \"/static/ephios/img/ephios-192x.png\", \"sizes\": \"192x192\", \"purpose\": \"any maskable\"},\n {\"src\": \"/static/ephios/img/ephios-512x.png\", \"sizes\": \"512x512\", \"purpose\": \"any maskable\"},\n {\"src\": \"/static/ephios/img/ephios-1024x.png\", \"sizes\": \"1024x1024\", \"purpose\": \"any maskable\"},\n]\n", "path": "ephios/settings.py"}]}
2,717
141
gh_patches_debug_32353
rasdani/github-patches
git_diff
pypi__warehouse-6342
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Change username & API token prefix, to make Travis auth easier Just wanted to share a potential pain point for folks using the new PyPI API tokens for authentication via a travis.yaml. The @token username needs to be wrapped in quotes and the `:` after `pypi` needs to be escaped to work, otherwise you hit a 403 error (thanks for the help tracking this down @ewdurbin). If you're using the environment variables through Travis' UI, the following works: ``` deploy: provider: pypi user: "@token" # quotes required for travis # server: https://test.pypi.org/legacy/ # uncomment to do a test deploy password: secure: $PASSWORD # stored in travis env var, with `:` after `pypi` escaped (pypi\:) on: branch: master skip_existing: true ``` If you're encrypting the token via the command line, you can just wrap your token in quotes: ```travis encrypt "<your-pypi_token>" --add deploy.password [--com]``` </issue> <code> [start of warehouse/macaroons/services.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import datetime 14 import json 15 import uuid 16 17 import pymacaroons 18 19 from sqlalchemy.orm import joinedload 20 from sqlalchemy.orm.exc import NoResultFound 21 from zope.interface import implementer 22 23 from warehouse.accounts.models import User 24 from warehouse.macaroons.caveats import InvalidMacaroon, Verifier 25 from warehouse.macaroons.interfaces import IMacaroonService 26 from warehouse.macaroons.models import Macaroon 27 28 29 @implementer(IMacaroonService) 30 class DatabaseMacaroonService: 31 def __init__(self, db_session): 32 self.db = db_session 33 34 def _extract_raw_macaroon(self, raw_macaroon): 35 """ 36 Returns the base64-encoded macaroon component of a PyPI macaroon, 37 dropping the prefix. 38 39 Returns None if the macaroon is None, has no prefix, or has the 40 wrong prefix. 41 """ 42 if raw_macaroon is None: 43 return None 44 45 try: 46 prefix, raw_macaroon = raw_macaroon.split(":", 1) 47 except ValueError: 48 return None 49 50 if prefix != "pypi": 51 return None 52 53 return raw_macaroon 54 55 def find_macaroon(self, macaroon_id): 56 """ 57 Returns a macaroon model from the DB by its identifier. 58 Returns None if no macaroon has the given ID. 59 """ 60 try: 61 dm = ( 62 self.db.query(Macaroon) 63 .options(joinedload("user")) 64 .filter(Macaroon.id == uuid.UUID(macaroon_id)) 65 .one() 66 ) 67 except NoResultFound: 68 return None 69 70 return dm 71 72 def find_userid(self, raw_macaroon): 73 """ 74 Returns the id of the user associated with the given raw (serialized) 75 macaroon. 76 """ 77 raw_macaroon = self._extract_raw_macaroon(raw_macaroon) 78 if raw_macaroon is None: 79 return None 80 81 m = pymacaroons.Macaroon.deserialize(raw_macaroon) 82 dm = self.find_macaroon(m.identifier.decode()) 83 84 if dm is None: 85 return None 86 87 return dm.user.id 88 89 def verify(self, raw_macaroon, context, principals, permission): 90 """ 91 Returns True if the given raw (serialized) macaroon is 92 valid for the context, principals, and requested permission. 93 94 Raises InvalidMacaroon if the macaroon is not valid. 95 """ 96 raw_macaroon = self._extract_raw_macaroon(raw_macaroon) 97 if raw_macaroon is None: 98 raise InvalidMacaroon("malformed or nonexistent macaroon") 99 100 m = pymacaroons.Macaroon.deserialize(raw_macaroon) 101 dm = self.find_macaroon(m.identifier.decode()) 102 103 if dm is None: 104 raise InvalidMacaroon("deleted or nonexistent macaroon") 105 106 verifier = Verifier(m, context, principals, permission) 107 if verifier.verify(dm.key): 108 dm.last_used = datetime.datetime.now() 109 return True 110 111 raise InvalidMacaroon("invalid macaroon") 112 113 def create_macaroon(self, location, user_id, description, caveats): 114 """ 115 Returns a tuple of a new raw (serialized) macaroon and its DB model. 116 The description provided is not embedded into the macaroon, only stored 117 in the DB model. 118 """ 119 user = self.db.query(User).filter(User.id == user_id).one() 120 121 dm = Macaroon(user=user, description=description, caveats=caveats) 122 self.db.add(dm) 123 self.db.flush() 124 125 m = pymacaroons.Macaroon( 126 location=location, 127 identifier=str(dm.id), 128 key=dm.key, 129 version=pymacaroons.MACAROON_V2, 130 ) 131 m.add_first_party_caveat(json.dumps(caveats)) 132 serialized_macaroon = f"pypi:{m.serialize()}" 133 return serialized_macaroon, dm 134 135 def delete_macaroon(self, macaroon_id): 136 """ 137 Deletes a macaroon from the DB by its identifier. 138 """ 139 dm = self.find_macaroon(macaroon_id) 140 self.db.delete(dm) 141 self.db.flush() 142 143 def get_macaroon_by_description(self, user_id, description): 144 """ 145 Returns a macaroon model from the DB with the given description, 146 if one exists for the given user. 147 148 Returns None if the user doesn't have a macaroon with this description. 149 """ 150 try: 151 dm = ( 152 self.db.query(Macaroon) 153 .options(joinedload("user")) 154 .filter(Macaroon.description == description) 155 .filter(Macaroon.user_id == user_id) 156 .one() 157 ) 158 except NoResultFound: 159 return None 160 161 return dm 162 163 164 def database_macaroon_factory(context, request): 165 return DatabaseMacaroonService(request.db) 166 [end of warehouse/macaroons/services.py] [start of warehouse/macaroons/auth_policy.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import base64 14 15 from pyramid.authentication import CallbackAuthenticationPolicy 16 from pyramid.interfaces import IAuthenticationPolicy, IAuthorizationPolicy 17 from pyramid.security import Denied 18 from pyramid.threadlocal import get_current_request 19 from zope.interface import implementer 20 21 from warehouse.cache.http import add_vary_callback 22 from warehouse.macaroons.interfaces import IMacaroonService 23 from warehouse.macaroons.services import InvalidMacaroon 24 25 26 def _extract_basic_macaroon(auth): 27 """ 28 A helper function for extracting a macaroon from a 29 HTTP Basic Authentication-style header. 30 31 Returns None if the header doesn't contain a structurally 32 valid macaroon, or the candidate (not yet verified) macaroon 33 in a serialized form. 34 """ 35 try: 36 authorization = base64.b64decode(auth).decode() 37 auth_method, _, auth = authorization.partition(":") 38 except ValueError: 39 return None 40 41 if auth_method != "@token": 42 return None 43 44 return auth 45 46 47 def _extract_http_macaroon(request): 48 """ 49 A helper function for the extraction of HTTP Macaroon from a given request. 50 Returns either a None if no macaroon could be found, or the string 51 that represents our serialized macaroon. 52 """ 53 authorization = request.headers.get("Authorization") 54 if not authorization: 55 return None 56 57 try: 58 auth_method, auth = authorization.split(" ", 1) 59 except ValueError: 60 return None 61 62 if auth_method.lower() == "basic": 63 return _extract_basic_macaroon(auth) 64 elif auth_method.lower() == "token": 65 return auth 66 67 return None 68 69 70 @implementer(IAuthenticationPolicy) 71 class MacaroonAuthenticationPolicy(CallbackAuthenticationPolicy): 72 def __init__(self, callback=None): 73 self.callback = callback 74 75 def unauthenticated_userid(self, request): 76 # If we're calling into this API on a request, then we want to register 77 # a callback which will ensure that the response varies based on the 78 # Authorization header. 79 request.add_response_callback(add_vary_callback("Authorization")) 80 81 # We need to extract our Macaroon from the request. 82 macaroon = _extract_http_macaroon(request) 83 if macaroon is None: 84 return None 85 86 # Check to see if our Macaroon exists in the database, and if so 87 # fetch the user that is associated with it. 88 macaroon_service = request.find_service(IMacaroonService, context=None) 89 userid = macaroon_service.find_userid(macaroon) 90 if userid is not None: 91 return str(userid) 92 93 def remember(self, request, userid, **kw): 94 # This is a NO-OP because our Macaroon header policy doesn't allow 95 # the ability for authentication to "remember" the user id. This 96 # assumes it has been configured in clients somewhere out of band. 97 return [] 98 99 def forget(self, request): 100 # This is a NO-OP because our Macaroon header policy doesn't allow 101 # the ability for authentication to "forget" the user id. This 102 # assumes it has been configured in clients somewhere out of band. 103 return [] 104 105 106 @implementer(IAuthorizationPolicy) 107 class MacaroonAuthorizationPolicy: 108 def __init__(self, policy): 109 self.policy = policy 110 111 def permits(self, context, principals, permission): 112 # The Pyramid API doesn't let us access the request here, so we have to pull it 113 # out of the thread local instead. 114 # TODO: Work with Pyramid devs to figure out if there is a better way to support 115 # the worklow we are using here or not. 116 request = get_current_request() 117 118 # Our request could possibly be a None, if there isn't an active request, in 119 # that case we're going to always deny, because without a request, we can't 120 # determine if this request is authorized or not. 121 if request is None: 122 return Denied("There was no active request.") 123 124 # Re-extract our Macaroon from the request, it sucks to have to do this work 125 # twice, but I believe it is inevitable unless we pass the Macaroon back as 126 # a principal-- which doesn't seem to be the right fit for it. 127 macaroon = _extract_http_macaroon(request) 128 129 # This logic will only happen on requests that are being authenticated with 130 # Macaroons. Any other request will just fall back to the standard Authorization 131 # policy. 132 if macaroon is not None: 133 macaroon_service = request.find_service(IMacaroonService, context=None) 134 135 try: 136 macaroon_service.verify(macaroon, context, principals, permission) 137 except InvalidMacaroon as exc: 138 return Denied(f"The supplied token was invalid: {str(exc)!r}") 139 140 # If our Macaroon is verified, then we'll pass this request to our underlying 141 # Authorization policy, so it can handle its own authorization logic on 142 # the prinicpal. 143 return self.policy.permits(context, principals, permission) 144 145 def principals_allowed_by_permission(self, context, permission): 146 # We just dispatch this, because Macaroons don't restrict what principals are 147 # allowed by a particular permission, they just restrict specific requests 148 # to not have that permission. 149 return self.policy.principals_allowed_by_permission(context, permission) 150 [end of warehouse/macaroons/auth_policy.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/warehouse/macaroons/auth_policy.py b/warehouse/macaroons/auth_policy.py --- a/warehouse/macaroons/auth_policy.py +++ b/warehouse/macaroons/auth_policy.py @@ -38,7 +38,8 @@ except ValueError: return None - if auth_method != "@token": + # TODO: Remove @token as an acceptable token username (GH-6345) + if auth_method != "@token" and auth_method != "__token__": return None return auth diff --git a/warehouse/macaroons/services.py b/warehouse/macaroons/services.py --- a/warehouse/macaroons/services.py +++ b/warehouse/macaroons/services.py @@ -31,7 +31,7 @@ def __init__(self, db_session): self.db = db_session - def _extract_raw_macaroon(self, raw_macaroon): + def _extract_raw_macaroon(self, prefixed_macaroon): """ Returns the base64-encoded macaroon component of a PyPI macaroon, dropping the prefix. @@ -39,13 +39,13 @@ Returns None if the macaroon is None, has no prefix, or has the wrong prefix. """ - if raw_macaroon is None: + if prefixed_macaroon is None: return None - try: - prefix, raw_macaroon = raw_macaroon.split(":", 1) - except ValueError: - return None + prefix, split, raw_macaroon = prefixed_macaroon.partition("-") + # TODO: Remove ':' as an acceptable delimiter for tokens (GH-6345) + if prefix != "pypi" or not split: + prefix, _, raw_macaroon = prefixed_macaroon.partition(":") if prefix != "pypi": return None @@ -129,7 +129,7 @@ version=pymacaroons.MACAROON_V2, ) m.add_first_party_caveat(json.dumps(caveats)) - serialized_macaroon = f"pypi:{m.serialize()}" + serialized_macaroon = f"pypi-{m.serialize()}" return serialized_macaroon, dm def delete_macaroon(self, macaroon_id):
{"golden_diff": "diff --git a/warehouse/macaroons/auth_policy.py b/warehouse/macaroons/auth_policy.py\n--- a/warehouse/macaroons/auth_policy.py\n+++ b/warehouse/macaroons/auth_policy.py\n@@ -38,7 +38,8 @@\n except ValueError:\n return None\n \n- if auth_method != \"@token\":\n+ # TODO: Remove @token as an acceptable token username (GH-6345)\n+ if auth_method != \"@token\" and auth_method != \"__token__\":\n return None\n \n return auth\ndiff --git a/warehouse/macaroons/services.py b/warehouse/macaroons/services.py\n--- a/warehouse/macaroons/services.py\n+++ b/warehouse/macaroons/services.py\n@@ -31,7 +31,7 @@\n def __init__(self, db_session):\n self.db = db_session\n \n- def _extract_raw_macaroon(self, raw_macaroon):\n+ def _extract_raw_macaroon(self, prefixed_macaroon):\n \"\"\"\n Returns the base64-encoded macaroon component of a PyPI macaroon,\n dropping the prefix.\n@@ -39,13 +39,13 @@\n Returns None if the macaroon is None, has no prefix, or has the\n wrong prefix.\n \"\"\"\n- if raw_macaroon is None:\n+ if prefixed_macaroon is None:\n return None\n \n- try:\n- prefix, raw_macaroon = raw_macaroon.split(\":\", 1)\n- except ValueError:\n- return None\n+ prefix, split, raw_macaroon = prefixed_macaroon.partition(\"-\")\n+ # TODO: Remove ':' as an acceptable delimiter for tokens (GH-6345)\n+ if prefix != \"pypi\" or not split:\n+ prefix, _, raw_macaroon = prefixed_macaroon.partition(\":\")\n \n if prefix != \"pypi\":\n return None\n@@ -129,7 +129,7 @@\n version=pymacaroons.MACAROON_V2,\n )\n m.add_first_party_caveat(json.dumps(caveats))\n- serialized_macaroon = f\"pypi:{m.serialize()}\"\n+ serialized_macaroon = f\"pypi-{m.serialize()}\"\n return serialized_macaroon, dm\n \n def delete_macaroon(self, macaroon_id):\n", "issue": "Change username & API token prefix, to make Travis auth easier\nJust wanted to share a potential pain point for folks using the new PyPI API tokens for authentication via a travis.yaml. The @token username needs to be wrapped in quotes and the `:` after `pypi` needs to be escaped to work, otherwise you hit a 403 error (thanks for the help tracking this down @ewdurbin).\r\n\r\nIf you're using the environment variables through Travis' UI, the following works:\r\n```\r\ndeploy:\r\n provider: pypi\r\n user: \"@token\" # quotes required for travis\r\n # server: https://test.pypi.org/legacy/ # uncomment to do a test deploy\r\n password:\r\n secure: $PASSWORD # stored in travis env var, with `:` after `pypi` escaped (pypi\\:)\r\n on:\r\n branch: master\r\n skip_existing: true\r\n```\r\n\r\nIf you're encrypting the token via the command line, you can just wrap your token in quotes:\r\n```travis encrypt \"<your-pypi_token>\" --add deploy.password [--com]```\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport json\nimport uuid\n\nimport pymacaroons\n\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom zope.interface import implementer\n\nfrom warehouse.accounts.models import User\nfrom warehouse.macaroons.caveats import InvalidMacaroon, Verifier\nfrom warehouse.macaroons.interfaces import IMacaroonService\nfrom warehouse.macaroons.models import Macaroon\n\n\n@implementer(IMacaroonService)\nclass DatabaseMacaroonService:\n def __init__(self, db_session):\n self.db = db_session\n\n def _extract_raw_macaroon(self, raw_macaroon):\n \"\"\"\n Returns the base64-encoded macaroon component of a PyPI macaroon,\n dropping the prefix.\n\n Returns None if the macaroon is None, has no prefix, or has the\n wrong prefix.\n \"\"\"\n if raw_macaroon is None:\n return None\n\n try:\n prefix, raw_macaroon = raw_macaroon.split(\":\", 1)\n except ValueError:\n return None\n\n if prefix != \"pypi\":\n return None\n\n return raw_macaroon\n\n def find_macaroon(self, macaroon_id):\n \"\"\"\n Returns a macaroon model from the DB by its identifier.\n Returns None if no macaroon has the given ID.\n \"\"\"\n try:\n dm = (\n self.db.query(Macaroon)\n .options(joinedload(\"user\"))\n .filter(Macaroon.id == uuid.UUID(macaroon_id))\n .one()\n )\n except NoResultFound:\n return None\n\n return dm\n\n def find_userid(self, raw_macaroon):\n \"\"\"\n Returns the id of the user associated with the given raw (serialized)\n macaroon.\n \"\"\"\n raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n if raw_macaroon is None:\n return None\n\n m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n dm = self.find_macaroon(m.identifier.decode())\n\n if dm is None:\n return None\n\n return dm.user.id\n\n def verify(self, raw_macaroon, context, principals, permission):\n \"\"\"\n Returns True if the given raw (serialized) macaroon is\n valid for the context, principals, and requested permission.\n\n Raises InvalidMacaroon if the macaroon is not valid.\n \"\"\"\n raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n if raw_macaroon is None:\n raise InvalidMacaroon(\"malformed or nonexistent macaroon\")\n\n m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n dm = self.find_macaroon(m.identifier.decode())\n\n if dm is None:\n raise InvalidMacaroon(\"deleted or nonexistent macaroon\")\n\n verifier = Verifier(m, context, principals, permission)\n if verifier.verify(dm.key):\n dm.last_used = datetime.datetime.now()\n return True\n\n raise InvalidMacaroon(\"invalid macaroon\")\n\n def create_macaroon(self, location, user_id, description, caveats):\n \"\"\"\n Returns a tuple of a new raw (serialized) macaroon and its DB model.\n The description provided is not embedded into the macaroon, only stored\n in the DB model.\n \"\"\"\n user = self.db.query(User).filter(User.id == user_id).one()\n\n dm = Macaroon(user=user, description=description, caveats=caveats)\n self.db.add(dm)\n self.db.flush()\n\n m = pymacaroons.Macaroon(\n location=location,\n identifier=str(dm.id),\n key=dm.key,\n version=pymacaroons.MACAROON_V2,\n )\n m.add_first_party_caveat(json.dumps(caveats))\n serialized_macaroon = f\"pypi:{m.serialize()}\"\n return serialized_macaroon, dm\n\n def delete_macaroon(self, macaroon_id):\n \"\"\"\n Deletes a macaroon from the DB by its identifier.\n \"\"\"\n dm = self.find_macaroon(macaroon_id)\n self.db.delete(dm)\n self.db.flush()\n\n def get_macaroon_by_description(self, user_id, description):\n \"\"\"\n Returns a macaroon model from the DB with the given description,\n if one exists for the given user.\n\n Returns None if the user doesn't have a macaroon with this description.\n \"\"\"\n try:\n dm = (\n self.db.query(Macaroon)\n .options(joinedload(\"user\"))\n .filter(Macaroon.description == description)\n .filter(Macaroon.user_id == user_id)\n .one()\n )\n except NoResultFound:\n return None\n\n return dm\n\n\ndef database_macaroon_factory(context, request):\n return DatabaseMacaroonService(request.db)\n", "path": "warehouse/macaroons/services.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\n\nfrom pyramid.authentication import CallbackAuthenticationPolicy\nfrom pyramid.interfaces import IAuthenticationPolicy, IAuthorizationPolicy\nfrom pyramid.security import Denied\nfrom pyramid.threadlocal import get_current_request\nfrom zope.interface import implementer\n\nfrom warehouse.cache.http import add_vary_callback\nfrom warehouse.macaroons.interfaces import IMacaroonService\nfrom warehouse.macaroons.services import InvalidMacaroon\n\n\ndef _extract_basic_macaroon(auth):\n \"\"\"\n A helper function for extracting a macaroon from a\n HTTP Basic Authentication-style header.\n\n Returns None if the header doesn't contain a structurally\n valid macaroon, or the candidate (not yet verified) macaroon\n in a serialized form.\n \"\"\"\n try:\n authorization = base64.b64decode(auth).decode()\n auth_method, _, auth = authorization.partition(\":\")\n except ValueError:\n return None\n\n if auth_method != \"@token\":\n return None\n\n return auth\n\n\ndef _extract_http_macaroon(request):\n \"\"\"\n A helper function for the extraction of HTTP Macaroon from a given request.\n Returns either a None if no macaroon could be found, or the string\n that represents our serialized macaroon.\n \"\"\"\n authorization = request.headers.get(\"Authorization\")\n if not authorization:\n return None\n\n try:\n auth_method, auth = authorization.split(\" \", 1)\n except ValueError:\n return None\n\n if auth_method.lower() == \"basic\":\n return _extract_basic_macaroon(auth)\n elif auth_method.lower() == \"token\":\n return auth\n\n return None\n\n\n@implementer(IAuthenticationPolicy)\nclass MacaroonAuthenticationPolicy(CallbackAuthenticationPolicy):\n def __init__(self, callback=None):\n self.callback = callback\n\n def unauthenticated_userid(self, request):\n # If we're calling into this API on a request, then we want to register\n # a callback which will ensure that the response varies based on the\n # Authorization header.\n request.add_response_callback(add_vary_callback(\"Authorization\"))\n\n # We need to extract our Macaroon from the request.\n macaroon = _extract_http_macaroon(request)\n if macaroon is None:\n return None\n\n # Check to see if our Macaroon exists in the database, and if so\n # fetch the user that is associated with it.\n macaroon_service = request.find_service(IMacaroonService, context=None)\n userid = macaroon_service.find_userid(macaroon)\n if userid is not None:\n return str(userid)\n\n def remember(self, request, userid, **kw):\n # This is a NO-OP because our Macaroon header policy doesn't allow\n # the ability for authentication to \"remember\" the user id. This\n # assumes it has been configured in clients somewhere out of band.\n return []\n\n def forget(self, request):\n # This is a NO-OP because our Macaroon header policy doesn't allow\n # the ability for authentication to \"forget\" the user id. This\n # assumes it has been configured in clients somewhere out of band.\n return []\n\n\n@implementer(IAuthorizationPolicy)\nclass MacaroonAuthorizationPolicy:\n def __init__(self, policy):\n self.policy = policy\n\n def permits(self, context, principals, permission):\n # The Pyramid API doesn't let us access the request here, so we have to pull it\n # out of the thread local instead.\n # TODO: Work with Pyramid devs to figure out if there is a better way to support\n # the worklow we are using here or not.\n request = get_current_request()\n\n # Our request could possibly be a None, if there isn't an active request, in\n # that case we're going to always deny, because without a request, we can't\n # determine if this request is authorized or not.\n if request is None:\n return Denied(\"There was no active request.\")\n\n # Re-extract our Macaroon from the request, it sucks to have to do this work\n # twice, but I believe it is inevitable unless we pass the Macaroon back as\n # a principal-- which doesn't seem to be the right fit for it.\n macaroon = _extract_http_macaroon(request)\n\n # This logic will only happen on requests that are being authenticated with\n # Macaroons. Any other request will just fall back to the standard Authorization\n # policy.\n if macaroon is not None:\n macaroon_service = request.find_service(IMacaroonService, context=None)\n\n try:\n macaroon_service.verify(macaroon, context, principals, permission)\n except InvalidMacaroon as exc:\n return Denied(f\"The supplied token was invalid: {str(exc)!r}\")\n\n # If our Macaroon is verified, then we'll pass this request to our underlying\n # Authorization policy, so it can handle its own authorization logic on\n # the prinicpal.\n return self.policy.permits(context, principals, permission)\n\n def principals_allowed_by_permission(self, context, permission):\n # We just dispatch this, because Macaroons don't restrict what principals are\n # allowed by a particular permission, they just restrict specific requests\n # to not have that permission.\n return self.policy.principals_allowed_by_permission(context, permission)\n", "path": "warehouse/macaroons/auth_policy.py"}]}
4,031
522
gh_patches_debug_30592
rasdani/github-patches
git_diff
mne-tools__mne-python-4380
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove deprecated imp module Currently, `mne/commands/utils.py` still uses the deprecated `imp` module, which has long been replaced with `importlib`. According to [this answer on SO](https://stackoverflow.com/a/67692/1112283), the current solution works only on Python 3.5/3.6, and there is a (deprecated) alternative for Python 3.3/3.4. All versions < 3.3 need to use `imp`. How should this be handled in MNE? </issue> <code> [start of mne/commands/utils.py] 1 """Some utility functions for commands (e.g. for cmdline handling).""" 2 3 # Authors: Yaroslav Halchenko <debian@onerussian.com> 4 # 5 # License: BSD (3-clause) 6 7 import imp 8 import os 9 import re 10 from optparse import OptionParser 11 12 import mne 13 14 15 def get_optparser(cmdpath, usage=None): 16 """Create OptionParser with cmd specific settings (e.g. prog value).""" 17 command = os.path.basename(cmdpath) 18 if re.match('mne_(.*).py', command): 19 command = command[4:-3] 20 elif re.match('mne_(.*).pyc', command): 21 command = command[4:-4] 22 23 # Fetch description 24 if cmdpath.endswith('.pyc'): 25 mod = imp.load_compiled('__temp', cmdpath) 26 else: 27 mod = imp.load_source('__temp', cmdpath) 28 if mod.__doc__: 29 doc, description, epilog = mod.__doc__, None, None 30 31 doc_lines = doc.split('\n') 32 description = doc_lines[0] 33 if len(doc_lines) > 1: 34 epilog = '\n'.join(doc_lines[1:]) 35 36 # monkey patch OptionParser to not wrap epilog 37 OptionParser.format_epilog = lambda self, formatter: self.epilog 38 parser = OptionParser(prog="mne %s" % command, 39 version=mne.__version__, 40 description=description, 41 epilog=epilog, usage=usage) 42 43 return parser 44 [end of mne/commands/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mne/commands/utils.py b/mne/commands/utils.py --- a/mne/commands/utils.py +++ b/mne/commands/utils.py @@ -4,7 +4,7 @@ # # License: BSD (3-clause) -import imp +import sys import os import re from optparse import OptionParser @@ -12,6 +12,42 @@ import mne +def load_module(name, path): + """Load module from .py/.pyc file. + + Parameters + ---------- + name : str + Name of the module. + path : str + Path to .py/.pyc file. + + Returns + ------- + mod : module + Imported module. + """ + if sys.version_info < (3, 3): + import imp + if path.endswith('.pyc'): + return imp.load_compiled(name, path) + else: + return imp.load_source(name, path) + elif sys.version_info < (3, 5): + if path.endswith('.pyc'): + from importlib.machinery import SourcelessFileLoader + return SourcelessFileLoader(name, path).load_module() + else: + from importlib.machinery import SourceFileLoader + return SourceFileLoader(name, path).load_module() + else: # Python 3.5 or greater + from importlib.util import spec_from_file_location, module_from_spec + spec = spec_from_file_location(name, path) + mod = module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + def get_optparser(cmdpath, usage=None): """Create OptionParser with cmd specific settings (e.g. prog value).""" command = os.path.basename(cmdpath) @@ -21,10 +57,7 @@ command = command[4:-4] # Fetch description - if cmdpath.endswith('.pyc'): - mod = imp.load_compiled('__temp', cmdpath) - else: - mod = imp.load_source('__temp', cmdpath) + mod = load_module('__temp', cmdpath) if mod.__doc__: doc, description, epilog = mod.__doc__, None, None
{"golden_diff": "diff --git a/mne/commands/utils.py b/mne/commands/utils.py\n--- a/mne/commands/utils.py\n+++ b/mne/commands/utils.py\n@@ -4,7 +4,7 @@\n #\n # License: BSD (3-clause)\n \n-import imp\n+import sys\n import os\n import re\n from optparse import OptionParser\n@@ -12,6 +12,42 @@\n import mne\n \n \n+def load_module(name, path):\n+ \"\"\"Load module from .py/.pyc file.\n+\n+ Parameters\n+ ----------\n+ name : str\n+ Name of the module.\n+ path : str\n+ Path to .py/.pyc file.\n+\n+ Returns\n+ -------\n+ mod : module\n+ Imported module.\n+ \"\"\"\n+ if sys.version_info < (3, 3):\n+ import imp\n+ if path.endswith('.pyc'):\n+ return imp.load_compiled(name, path)\n+ else:\n+ return imp.load_source(name, path)\n+ elif sys.version_info < (3, 5):\n+ if path.endswith('.pyc'):\n+ from importlib.machinery import SourcelessFileLoader\n+ return SourcelessFileLoader(name, path).load_module()\n+ else:\n+ from importlib.machinery import SourceFileLoader\n+ return SourceFileLoader(name, path).load_module()\n+ else: # Python 3.5 or greater\n+ from importlib.util import spec_from_file_location, module_from_spec\n+ spec = spec_from_file_location(name, path)\n+ mod = module_from_spec(spec)\n+ spec.loader.exec_module(mod)\n+ return mod\n+\n+\n def get_optparser(cmdpath, usage=None):\n \"\"\"Create OptionParser with cmd specific settings (e.g. prog value).\"\"\"\n command = os.path.basename(cmdpath)\n@@ -21,10 +57,7 @@\n command = command[4:-4]\n \n # Fetch description\n- if cmdpath.endswith('.pyc'):\n- mod = imp.load_compiled('__temp', cmdpath)\n- else:\n- mod = imp.load_source('__temp', cmdpath)\n+ mod = load_module('__temp', cmdpath)\n if mod.__doc__:\n doc, description, epilog = mod.__doc__, None, None\n", "issue": "Remove deprecated imp module\nCurrently, `mne/commands/utils.py` still uses the deprecated `imp` module, which has long been replaced with `importlib`. According to [this answer on SO](https://stackoverflow.com/a/67692/1112283), the current solution works only on Python 3.5/3.6, and there is a (deprecated) alternative for Python 3.3/3.4. All versions < 3.3 need to use `imp`.\r\n\r\nHow should this be handled in MNE?\n", "before_files": [{"content": "\"\"\"Some utility functions for commands (e.g. for cmdline handling).\"\"\"\n\n# Authors: Yaroslav Halchenko <debian@onerussian.com>\n#\n# License: BSD (3-clause)\n\nimport imp\nimport os\nimport re\nfrom optparse import OptionParser\n\nimport mne\n\n\ndef get_optparser(cmdpath, usage=None):\n \"\"\"Create OptionParser with cmd specific settings (e.g. prog value).\"\"\"\n command = os.path.basename(cmdpath)\n if re.match('mne_(.*).py', command):\n command = command[4:-3]\n elif re.match('mne_(.*).pyc', command):\n command = command[4:-4]\n\n # Fetch description\n if cmdpath.endswith('.pyc'):\n mod = imp.load_compiled('__temp', cmdpath)\n else:\n mod = imp.load_source('__temp', cmdpath)\n if mod.__doc__:\n doc, description, epilog = mod.__doc__, None, None\n\n doc_lines = doc.split('\\n')\n description = doc_lines[0]\n if len(doc_lines) > 1:\n epilog = '\\n'.join(doc_lines[1:])\n\n # monkey patch OptionParser to not wrap epilog\n OptionParser.format_epilog = lambda self, formatter: self.epilog\n parser = OptionParser(prog=\"mne %s\" % command,\n version=mne.__version__,\n description=description,\n epilog=epilog, usage=usage)\n\n return parser\n", "path": "mne/commands/utils.py"}]}
1,065
512
gh_patches_debug_18694
rasdani/github-patches
git_diff
sublimelsp__LSP-1371
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Diagnostic not highlighted in view when range is empty **Describe the bug** A diagnostic that has an empty range is not visible in the view (only in the gutter and in diagnostics panel). **To Reproduce** Receive a diagnostic like: ```js { "diagnostics": [ { "code": { "target": "https://eslint.org/docs/rules/curly", "value": "curly" }, "message": "Expected { after 'if' condition.", "range": { "end": { "character": 9, "line": 607 }, "start": { "character": 9, "line": 607 } }, "severity": 1, "source": "eslint" }, ``` **Expected behavior** See where on the line the problem is. Otherwise, it's difficult to know where code action can be triggered. **Screenshots** ![Screenshot 2020-09-25 at 22 58 01](https://user-images.githubusercontent.com/153197/94315521-f6394080-ff82-11ea-9dc6-7adffb643465.png) **Environment (please complete the following information):** - OS: macOS - Sublime Text version: 4087 - LSP version: 49ea7330301078035ad56b94218a9c6edb604498 - Language servers used: eslint </issue> <code> [start of plugin/session_view.py] 1 from .core.protocol import Diagnostic 2 from .core.protocol import Request 3 from .core.sessions import Session 4 from .core.settings import userprefs 5 from .core.types import view2scope 6 from .core.typing import Any, Iterable, List, Tuple, Optional, Dict 7 from .core.views import DIAGNOSTIC_SEVERITY 8 from .core.windows import AbstractViewListener 9 from .session_buffer import SessionBuffer 10 from weakref import ref 11 from weakref import WeakValueDictionary 12 import sublime 13 14 15 class SessionView: 16 """ 17 Holds state per session per view. 18 """ 19 20 LANGUAGE_ID_KEY = "lsp_language" 21 SHOW_DEFINITIONS_KEY = "show_definitions" 22 HOVER_PROVIDER_KEY = "hoverProvider" 23 HOVER_PROVIDER_COUNT_KEY = "lsp_hover_provider_count" 24 25 _session_buffers = WeakValueDictionary() # type: WeakValueDictionary[Tuple[str, int], SessionBuffer] 26 27 def __init__(self, listener: AbstractViewListener, session: Session) -> None: 28 self.view = listener.view 29 self.session = session 30 self.active_requests = {} # type: Dict[int, Request] 31 settings = self.view.settings() 32 # TODO: Language ID must be UNIQUE! 33 languages = settings.get(self.LANGUAGE_ID_KEY) 34 self._language_id = '' 35 if not isinstance(languages, dict): 36 languages = {} 37 for language in session.config.languages: 38 if language.match_scope(view2scope(self.view)): 39 languages[session.config.name] = language.id 40 self._language_id = language.id 41 break 42 settings.set(self.LANGUAGE_ID_KEY, languages) 43 buffer_id = self.view.buffer_id() 44 key = (session.config.name, buffer_id) 45 session_buffer = self._session_buffers.get(key) 46 if session_buffer is None: 47 session_buffer = SessionBuffer(self, buffer_id, self._language_id) 48 self._session_buffers[key] = session_buffer 49 else: 50 session_buffer.add_session_view(self) 51 self.session_buffer = session_buffer 52 self.listener = ref(listener) 53 session.register_session_view_async(self) 54 session.config.set_view_status(self.view, "") 55 if self.session.has_capability(self.HOVER_PROVIDER_KEY): 56 self._increment_hover_count() 57 58 def __del__(self) -> None: 59 if self.session.has_capability(self.HOVER_PROVIDER_KEY): 60 self._decrement_hover_count() 61 # If the session is exiting then there's no point in sending textDocument/didClose and there's also no point 62 # in unregistering ourselves from the session. 63 if not self.session.exiting: 64 self.session.unregister_session_view_async(self) 65 self.session.config.erase_view_status(self.view) 66 settings = self.view.settings() # type: sublime.Settings 67 # TODO: Language ID must be UNIQUE! 68 languages = settings.get(self.LANGUAGE_ID_KEY) 69 if isinstance(languages, dict): 70 languages.pop(self.session.config.name, None) 71 if languages: 72 settings.set(self.LANGUAGE_ID_KEY, languages) 73 else: 74 settings.erase(self.LANGUAGE_ID_KEY) 75 for severity in range(1, len(DIAGNOSTIC_SEVERITY) + 1): 76 self.view.erase_regions(self.diagnostics_key(severity)) 77 78 def _increment_hover_count(self) -> None: 79 settings = self.view.settings() 80 count = settings.get(self.HOVER_PROVIDER_COUNT_KEY, 0) 81 if isinstance(count, int): 82 count += 1 83 settings.set(self.HOVER_PROVIDER_COUNT_KEY, count) 84 settings.set(self.SHOW_DEFINITIONS_KEY, False) 85 86 def _decrement_hover_count(self) -> None: 87 settings = self.view.settings() 88 count = settings.get(self.HOVER_PROVIDER_COUNT_KEY) 89 if isinstance(count, int): 90 count -= 1 91 if count == 0: 92 settings.erase(self.HOVER_PROVIDER_COUNT_KEY) 93 settings.set(self.SHOW_DEFINITIONS_KEY, True) 94 95 def get_capability(self, capability_path: str) -> Optional[Any]: 96 return self.session_buffer.get_capability(capability_path) 97 98 def has_capability(self, capability_path: str) -> bool: 99 value = self.session_buffer.get_capability(capability_path) 100 return isinstance(value, dict) or bool(value) 101 102 def on_capability_added_async(self, capability_path: str, options: Dict[str, Any]) -> None: 103 if capability_path == self.HOVER_PROVIDER_KEY: 104 self._increment_hover_count() 105 106 def on_capability_removed_async(self, discarded: Dict[str, Any]) -> None: 107 if self.HOVER_PROVIDER_KEY in discarded: 108 self._decrement_hover_count() 109 110 def has_capability_async(self, capability_path: str) -> bool: 111 return self.session_buffer.has_capability(capability_path) 112 113 def shutdown_async(self) -> None: 114 listener = self.listener() 115 if listener: 116 listener.on_session_shutdown_async(self.session) 117 118 def diagnostics_key(self, severity: int) -> str: 119 return "lsp{}d{}".format(self.session.config.name, severity) 120 121 def present_diagnostics_async(self, flags: int) -> None: 122 data_per_severity = self.session_buffer.data_per_severity 123 for severity in reversed(range(1, len(DIAGNOSTIC_SEVERITY) + 1)): 124 key = self.diagnostics_key(severity) 125 data = data_per_severity.get(severity) 126 if data is None: 127 self.view.erase_regions(key) 128 elif ((severity <= userprefs().show_diagnostics_severity_level) and 129 (data.icon or flags != (sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE))): 130 self.view.add_regions(key, data.regions, data.scope, data.icon, flags) 131 else: 132 self.view.erase_regions(key) 133 listener = self.listener() 134 if listener: 135 listener.on_diagnostics_updated_async() 136 137 def get_diagnostics_async(self) -> List[Diagnostic]: 138 return self.session_buffer.diagnostics 139 140 def on_request_started_async(self, request_id: int, request: Request) -> None: 141 self.active_requests[request_id] = request 142 143 def on_request_finished_async(self, request_id: int) -> None: 144 self.active_requests.pop(request_id, None) 145 146 def on_text_changed_async(self, changes: Iterable[sublime.TextChange]) -> None: 147 self.session_buffer.on_text_changed_async(self.view, changes) 148 149 def on_revert_async(self) -> None: 150 self.session_buffer.on_revert_async(self.view) 151 152 def on_reload_async(self) -> None: 153 self.session_buffer.on_reload_async(self.view) 154 155 def purge_changes_async(self) -> None: 156 self.session_buffer.purge_changes_async(self.view) 157 158 def on_pre_save_async(self, old_file_name: str) -> None: 159 self.session_buffer.on_pre_save_async(self.view, old_file_name) 160 161 def on_post_save_async(self) -> None: 162 self.session_buffer.on_post_save_async(self.view) 163 164 def __str__(self) -> str: 165 return '{}:{}'.format(self.session.config.name, self.view.id()) 166 [end of plugin/session_view.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugin/session_view.py b/plugin/session_view.py --- a/plugin/session_view.py +++ b/plugin/session_view.py @@ -127,7 +127,15 @@ self.view.erase_regions(key) elif ((severity <= userprefs().show_diagnostics_severity_level) and (data.icon or flags != (sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE))): - self.view.add_regions(key, data.regions, data.scope, data.icon, flags) + + def handle_same_regions(region: sublime.Region) -> sublime.Region: + # this allows showing diagnostics with same begin and end in the view + if region.a == region.b: + return sublime.Region(region.a, region.a + 1) + return region + + underline_regions = list(map(handle_same_regions, data.regions)) + self.view.add_regions(key, underline_regions, data.scope, data.icon, flags) else: self.view.erase_regions(key) listener = self.listener()
{"golden_diff": "diff --git a/plugin/session_view.py b/plugin/session_view.py\n--- a/plugin/session_view.py\n+++ b/plugin/session_view.py\n@@ -127,7 +127,15 @@\n self.view.erase_regions(key)\n elif ((severity <= userprefs().show_diagnostics_severity_level) and\n (data.icon or flags != (sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE))):\n- self.view.add_regions(key, data.regions, data.scope, data.icon, flags)\n+\n+ def handle_same_regions(region: sublime.Region) -> sublime.Region:\n+ # this allows showing diagnostics with same begin and end in the view\n+ if region.a == region.b:\n+ return sublime.Region(region.a, region.a + 1)\n+ return region\n+\n+ underline_regions = list(map(handle_same_regions, data.regions))\n+ self.view.add_regions(key, underline_regions, data.scope, data.icon, flags)\n else:\n self.view.erase_regions(key)\n listener = self.listener()\n", "issue": "Diagnostic not highlighted in view when range is empty\n**Describe the bug**\r\nA diagnostic that has an empty range is not visible in the view (only in the gutter and in diagnostics panel).\r\n\r\n**To Reproduce**\r\nReceive a diagnostic like:\r\n```js\r\n{\r\n \"diagnostics\": [\r\n {\r\n \"code\": {\r\n \"target\": \"https://eslint.org/docs/rules/curly\",\r\n \"value\": \"curly\"\r\n },\r\n \"message\": \"Expected { after 'if' condition.\",\r\n \"range\": {\r\n \"end\": {\r\n \"character\": 9,\r\n \"line\": 607\r\n },\r\n \"start\": {\r\n \"character\": 9,\r\n \"line\": 607\r\n }\r\n },\r\n \"severity\": 1,\r\n \"source\": \"eslint\"\r\n },\r\n```\r\n\r\n**Expected behavior**\r\nSee where on the line the problem is. Otherwise, it's difficult to know where code action can be triggered.\r\n\r\n**Screenshots**\r\n![Screenshot 2020-09-25 at 22 58 01](https://user-images.githubusercontent.com/153197/94315521-f6394080-ff82-11ea-9dc6-7adffb643465.png)\r\n\r\n\r\n**Environment (please complete the following information):**\r\n- OS: macOS\r\n- Sublime Text version: 4087\r\n- LSP version: 49ea7330301078035ad56b94218a9c6edb604498\r\n- Language servers used: eslint\n", "before_files": [{"content": "from .core.protocol import Diagnostic\nfrom .core.protocol import Request\nfrom .core.sessions import Session\nfrom .core.settings import userprefs\nfrom .core.types import view2scope\nfrom .core.typing import Any, Iterable, List, Tuple, Optional, Dict\nfrom .core.views import DIAGNOSTIC_SEVERITY\nfrom .core.windows import AbstractViewListener\nfrom .session_buffer import SessionBuffer\nfrom weakref import ref\nfrom weakref import WeakValueDictionary\nimport sublime\n\n\nclass SessionView:\n \"\"\"\n Holds state per session per view.\n \"\"\"\n\n LANGUAGE_ID_KEY = \"lsp_language\"\n SHOW_DEFINITIONS_KEY = \"show_definitions\"\n HOVER_PROVIDER_KEY = \"hoverProvider\"\n HOVER_PROVIDER_COUNT_KEY = \"lsp_hover_provider_count\"\n\n _session_buffers = WeakValueDictionary() # type: WeakValueDictionary[Tuple[str, int], SessionBuffer]\n\n def __init__(self, listener: AbstractViewListener, session: Session) -> None:\n self.view = listener.view\n self.session = session\n self.active_requests = {} # type: Dict[int, Request]\n settings = self.view.settings()\n # TODO: Language ID must be UNIQUE!\n languages = settings.get(self.LANGUAGE_ID_KEY)\n self._language_id = ''\n if not isinstance(languages, dict):\n languages = {}\n for language in session.config.languages:\n if language.match_scope(view2scope(self.view)):\n languages[session.config.name] = language.id\n self._language_id = language.id\n break\n settings.set(self.LANGUAGE_ID_KEY, languages)\n buffer_id = self.view.buffer_id()\n key = (session.config.name, buffer_id)\n session_buffer = self._session_buffers.get(key)\n if session_buffer is None:\n session_buffer = SessionBuffer(self, buffer_id, self._language_id)\n self._session_buffers[key] = session_buffer\n else:\n session_buffer.add_session_view(self)\n self.session_buffer = session_buffer\n self.listener = ref(listener)\n session.register_session_view_async(self)\n session.config.set_view_status(self.view, \"\")\n if self.session.has_capability(self.HOVER_PROVIDER_KEY):\n self._increment_hover_count()\n\n def __del__(self) -> None:\n if self.session.has_capability(self.HOVER_PROVIDER_KEY):\n self._decrement_hover_count()\n # If the session is exiting then there's no point in sending textDocument/didClose and there's also no point\n # in unregistering ourselves from the session.\n if not self.session.exiting:\n self.session.unregister_session_view_async(self)\n self.session.config.erase_view_status(self.view)\n settings = self.view.settings() # type: sublime.Settings\n # TODO: Language ID must be UNIQUE!\n languages = settings.get(self.LANGUAGE_ID_KEY)\n if isinstance(languages, dict):\n languages.pop(self.session.config.name, None)\n if languages:\n settings.set(self.LANGUAGE_ID_KEY, languages)\n else:\n settings.erase(self.LANGUAGE_ID_KEY)\n for severity in range(1, len(DIAGNOSTIC_SEVERITY) + 1):\n self.view.erase_regions(self.diagnostics_key(severity))\n\n def _increment_hover_count(self) -> None:\n settings = self.view.settings()\n count = settings.get(self.HOVER_PROVIDER_COUNT_KEY, 0)\n if isinstance(count, int):\n count += 1\n settings.set(self.HOVER_PROVIDER_COUNT_KEY, count)\n settings.set(self.SHOW_DEFINITIONS_KEY, False)\n\n def _decrement_hover_count(self) -> None:\n settings = self.view.settings()\n count = settings.get(self.HOVER_PROVIDER_COUNT_KEY)\n if isinstance(count, int):\n count -= 1\n if count == 0:\n settings.erase(self.HOVER_PROVIDER_COUNT_KEY)\n settings.set(self.SHOW_DEFINITIONS_KEY, True)\n\n def get_capability(self, capability_path: str) -> Optional[Any]:\n return self.session_buffer.get_capability(capability_path)\n\n def has_capability(self, capability_path: str) -> bool:\n value = self.session_buffer.get_capability(capability_path)\n return isinstance(value, dict) or bool(value)\n\n def on_capability_added_async(self, capability_path: str, options: Dict[str, Any]) -> None:\n if capability_path == self.HOVER_PROVIDER_KEY:\n self._increment_hover_count()\n\n def on_capability_removed_async(self, discarded: Dict[str, Any]) -> None:\n if self.HOVER_PROVIDER_KEY in discarded:\n self._decrement_hover_count()\n\n def has_capability_async(self, capability_path: str) -> bool:\n return self.session_buffer.has_capability(capability_path)\n\n def shutdown_async(self) -> None:\n listener = self.listener()\n if listener:\n listener.on_session_shutdown_async(self.session)\n\n def diagnostics_key(self, severity: int) -> str:\n return \"lsp{}d{}\".format(self.session.config.name, severity)\n\n def present_diagnostics_async(self, flags: int) -> None:\n data_per_severity = self.session_buffer.data_per_severity\n for severity in reversed(range(1, len(DIAGNOSTIC_SEVERITY) + 1)):\n key = self.diagnostics_key(severity)\n data = data_per_severity.get(severity)\n if data is None:\n self.view.erase_regions(key)\n elif ((severity <= userprefs().show_diagnostics_severity_level) and\n (data.icon or flags != (sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE))):\n self.view.add_regions(key, data.regions, data.scope, data.icon, flags)\n else:\n self.view.erase_regions(key)\n listener = self.listener()\n if listener:\n listener.on_diagnostics_updated_async()\n\n def get_diagnostics_async(self) -> List[Diagnostic]:\n return self.session_buffer.diagnostics\n\n def on_request_started_async(self, request_id: int, request: Request) -> None:\n self.active_requests[request_id] = request\n\n def on_request_finished_async(self, request_id: int) -> None:\n self.active_requests.pop(request_id, None)\n\n def on_text_changed_async(self, changes: Iterable[sublime.TextChange]) -> None:\n self.session_buffer.on_text_changed_async(self.view, changes)\n\n def on_revert_async(self) -> None:\n self.session_buffer.on_revert_async(self.view)\n\n def on_reload_async(self) -> None:\n self.session_buffer.on_reload_async(self.view)\n\n def purge_changes_async(self) -> None:\n self.session_buffer.purge_changes_async(self.view)\n\n def on_pre_save_async(self, old_file_name: str) -> None:\n self.session_buffer.on_pre_save_async(self.view, old_file_name)\n\n def on_post_save_async(self) -> None:\n self.session_buffer.on_post_save_async(self.view)\n\n def __str__(self) -> str:\n return '{}:{}'.format(self.session.config.name, self.view.id())\n", "path": "plugin/session_view.py"}]}
2,774
220
gh_patches_debug_18667
rasdani/github-patches
git_diff
Bitmessage__PyBitmessage-1413
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Duplicate connections to some onion peers The description is based on messages from `[chan] bitmessage` but I've seen it myself a couple of weeks ago (I thought it's related to my #1394 - wrongly). ![image](https://user-images.githubusercontent.com/4012700/49939372-56add980-fee5-11e8-8f6c-de2f83123ebd.png) Changes proposed today: ```patch diff --git a/src/network/connectionpool.py b/src/network/connectionpool.py index e599cdf..c5ba701 100644 --- a/src/network/connectionpool.py +++ b/src/network/connectionpool.py @@ -93,7 +93,7 @@ class BMConnectionPool(object): del self.inboundConnections[connection.destination.host] except KeyError: pass - connection.close() + connection.handle_close() def getListeningIP(self): if BMConfigParser().safeGet("bitmessagesettings", "onionhostname").endswith(".onion"): ``` </issue> <code> [start of src/network/connectionpool.py] 1 from ConfigParser import NoOptionError, NoSectionError 2 import errno 3 import socket 4 import time 5 import random 6 import re 7 8 from bmconfigparser import BMConfigParser 9 from debug import logger 10 import helper_bootstrap 11 import knownnodes 12 from network.proxy import Proxy 13 from network.tcp import TCPServer, Socks5BMConnection, Socks4aBMConnection, TCPConnection 14 from network.udp import UDPSocket 15 from network.connectionchooser import chooseConnection 16 import network.asyncore_pollchoose as asyncore 17 import protocol 18 from singleton import Singleton 19 import state 20 import helper_random 21 22 23 @Singleton 24 class BMConnectionPool(object): 25 def __init__(self): 26 asyncore.set_rates( 27 BMConfigParser().safeGetInt("bitmessagesettings", "maxdownloadrate"), 28 BMConfigParser().safeGetInt("bitmessagesettings", "maxuploadrate")) 29 self.outboundConnections = {} 30 self.inboundConnections = {} 31 self.listeningSockets = {} 32 self.udpSockets = {} 33 self.streams = [] 34 self.lastSpawned = 0 35 self.spawnWait = 2 36 self.bootstrapped = False 37 38 def connectToStream(self, streamNumber): 39 self.streams.append(streamNumber) 40 41 def getConnectionByAddr(self, addr): 42 if addr in self.inboundConnections: 43 return self.inboundConnections[addr] 44 try: 45 if addr.host in self.inboundConnections: 46 return self.inboundConnections[addr.host] 47 except AttributeError: 48 pass 49 if addr in self.outboundConnections: 50 return self.outboundConnections[addr] 51 try: 52 if addr.host in self.udpSockets: 53 return self.udpSockets[addr.host] 54 except AttributeError: 55 pass 56 raise KeyError 57 58 def isAlreadyConnected(self, nodeid): 59 for i in self.inboundConnections.values() + self.outboundConnections.values(): 60 try: 61 if nodeid == i.nodeid: 62 return True 63 except AttributeError: 64 pass 65 return False 66 67 def addConnection(self, connection): 68 if isinstance(connection, UDPSocket): 69 return 70 if connection.isOutbound: 71 self.outboundConnections[connection.destination] = connection 72 else: 73 if connection.destination.host in self.inboundConnections: 74 self.inboundConnections[connection.destination] = connection 75 else: 76 self.inboundConnections[connection.destination.host] = connection 77 78 def removeConnection(self, connection): 79 if isinstance(connection, UDPSocket): 80 del self.udpSockets[connection.listening.host] 81 elif isinstance(connection, TCPServer): 82 del self.listeningSockets[state.Peer(connection.destination.host, connection.destination.port)] 83 elif connection.isOutbound: 84 try: 85 del self.outboundConnections[connection.destination] 86 except KeyError: 87 pass 88 else: 89 try: 90 del self.inboundConnections[connection.destination] 91 except KeyError: 92 try: 93 del self.inboundConnections[connection.destination.host] 94 except KeyError: 95 pass 96 connection.close() 97 98 def getListeningIP(self): 99 if BMConfigParser().safeGet("bitmessagesettings", "onionhostname").endswith(".onion"): 100 host = BMConfigParser().safeGet("bitmessagesettings", "onionbindip") 101 else: 102 host = '127.0.0.1' 103 if BMConfigParser().safeGetBoolean("bitmessagesettings", "sockslisten") or \ 104 BMConfigParser().get("bitmessagesettings", "socksproxytype") == "none": 105 # python doesn't like bind + INADDR_ANY? 106 #host = socket.INADDR_ANY 107 host = BMConfigParser().get("network", "bind") 108 return host 109 110 def startListening(self, bind=None): 111 if bind is None: 112 bind = self.getListeningIP() 113 port = BMConfigParser().safeGetInt("bitmessagesettings", "port") 114 # correct port even if it changed 115 ls = TCPServer(host=bind, port=port) 116 self.listeningSockets[ls.destination] = ls 117 118 def startUDPSocket(self, bind=None): 119 if bind is None: 120 host = self.getListeningIP() 121 udpSocket = UDPSocket(host=host, announcing=True) 122 else: 123 if bind is False: 124 udpSocket = UDPSocket(announcing=False) 125 else: 126 udpSocket = UDPSocket(host=bind, announcing=True) 127 self.udpSockets[udpSocket.listening.host] = udpSocket 128 129 def loop(self): 130 # defaults to empty loop if outbound connections are maxed 131 spawnConnections = False 132 acceptConnections = True 133 if BMConfigParser().safeGetBoolean('bitmessagesettings', 'dontconnect'): 134 acceptConnections = False 135 elif BMConfigParser().safeGetBoolean('bitmessagesettings', 'sendoutgoingconnections'): 136 spawnConnections = True 137 if BMConfigParser().get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS' and \ 138 (not BMConfigParser().getboolean('bitmessagesettings', 'sockslisten') and \ 139 ".onion" not in BMConfigParser().get('bitmessagesettings', 'onionhostname')): 140 acceptConnections = False 141 142 if spawnConnections: 143 if not knownnodes.knownNodesActual: 144 helper_bootstrap.dns() 145 if not self.bootstrapped: 146 self.bootstrapped = True 147 Proxy.proxy = (BMConfigParser().safeGet("bitmessagesettings", "sockshostname"), 148 BMConfigParser().safeGetInt("bitmessagesettings", "socksport")) 149 # TODO AUTH 150 # TODO reset based on GUI settings changes 151 try: 152 if not BMConfigParser().get("network", "onionsocksproxytype").startswith("SOCKS"): 153 raise NoOptionError 154 Proxy.onionproxy = (BMConfigParser().get("network", "onionsockshostname"), 155 BMConfigParser().getint("network", "onionsocksport")) 156 except (NoOptionError, NoSectionError): 157 Proxy.onionproxy = None 158 established = sum(1 for c in self.outboundConnections.values() if (c.connected and c.fullyEstablished)) 159 pending = len(self.outboundConnections) - established 160 if established < BMConfigParser().safeGetInt("bitmessagesettings", "maxoutboundconnections"): 161 for i in range(state.maximumNumberOfHalfOpenConnections - pending): 162 try: 163 chosen = chooseConnection(helper_random.randomchoice(self.streams)) 164 except ValueError: 165 continue 166 if chosen in self.outboundConnections: 167 continue 168 if chosen.host in self.inboundConnections: 169 continue 170 # don't connect to self 171 if chosen in state.ownAddresses: 172 continue 173 174 #for c in self.outboundConnections: 175 # if chosen == c.destination: 176 # continue 177 #for c in self.inboundConnections: 178 # if chosen.host == c.destination.host: 179 # continue 180 try: 181 if chosen.host.endswith(".onion") and Proxy.onionproxy is not None: 182 if BMConfigParser().get("network", "onionsocksproxytype") == "SOCKS5": 183 self.addConnection(Socks5BMConnection(chosen)) 184 elif BMConfigParser().get("network", "onionsocksproxytype") == "SOCKS4a": 185 self.addConnection(Socks4aBMConnection(chosen)) 186 elif BMConfigParser().safeGet("bitmessagesettings", "socksproxytype") == "SOCKS5": 187 self.addConnection(Socks5BMConnection(chosen)) 188 elif BMConfigParser().safeGet("bitmessagesettings", "socksproxytype") == "SOCKS4a": 189 self.addConnection(Socks4aBMConnection(chosen)) 190 else: 191 self.addConnection(TCPConnection(chosen)) 192 except socket.error as e: 193 if e.errno == errno.ENETUNREACH: 194 continue 195 except (NoSectionError, NoOptionError): 196 # shouldn't happen 197 pass 198 199 self.lastSpawned = time.time() 200 else: 201 for i in ( 202 self.inboundConnections.values() + 203 self.outboundConnections.values() 204 ): 205 i.set_state("close") 206 # FIXME: rating will be increased after next connection 207 i.handle_close() 208 209 if acceptConnections: 210 if not self.listeningSockets: 211 if BMConfigParser().safeGet("network", "bind") == '': 212 self.startListening() 213 else: 214 for bind in re.sub("[^\w.]+", " ", BMConfigParser().safeGet("network", "bind")).split(): 215 self.startListening(bind) 216 logger.info('Listening for incoming connections.') 217 if not self.udpSockets: 218 if BMConfigParser().safeGet("network", "bind") == '': 219 self.startUDPSocket() 220 else: 221 for bind in re.sub("[^\w.]+", " ", BMConfigParser().safeGet("network", "bind")).split(): 222 self.startUDPSocket(bind) 223 self.startUDPSocket(False) 224 logger.info('Starting UDP socket(s).') 225 else: 226 if self.listeningSockets: 227 for i in self.listeningSockets.values(): 228 i.close_reason = "Stopping listening" 229 i.accepting = i.connecting = i.connected = False 230 logger.info('Stopped listening for incoming connections.') 231 if self.udpSockets: 232 for i in self.udpSockets.values(): 233 i.close_reason = "Stopping UDP socket" 234 i.accepting = i.connecting = i.connected = False 235 logger.info('Stopped udp sockets.') 236 237 loopTime = float(self.spawnWait) 238 if self.lastSpawned < time.time() - self.spawnWait: 239 loopTime = 2.0 240 asyncore.loop(timeout=loopTime, count=1000) 241 242 reaper = [] 243 for i in self.inboundConnections.values() + self.outboundConnections.values(): 244 minTx = time.time() - 20 245 if i.fullyEstablished: 246 minTx -= 300 - 20 247 if i.lastTx < minTx: 248 if i.fullyEstablished: 249 i.append_write_buf(protocol.CreatePacket('ping')) 250 else: 251 i.close_reason = "Timeout (%is)" % (time.time() - i.lastTx) 252 i.set_state("close") 253 for i in self.inboundConnections.values() + self.outboundConnections.values() + self.listeningSockets.values() + self.udpSockets.values(): 254 if not (i.accepting or i.connecting or i.connected): 255 reaper.append(i) 256 else: 257 try: 258 if i.state == "close": 259 reaper.append(i) 260 except AttributeError: 261 pass 262 for i in reaper: 263 self.removeConnection(i) 264 [end of src/network/connectionpool.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/network/connectionpool.py b/src/network/connectionpool.py --- a/src/network/connectionpool.py +++ b/src/network/connectionpool.py @@ -93,7 +93,7 @@ del self.inboundConnections[connection.destination.host] except KeyError: pass - connection.close() + connection.handle_close() def getListeningIP(self): if BMConfigParser().safeGet("bitmessagesettings", "onionhostname").endswith(".onion"): @@ -202,7 +202,6 @@ self.inboundConnections.values() + self.outboundConnections.values() ): - i.set_state("close") # FIXME: rating will be increased after next connection i.handle_close()
{"golden_diff": "diff --git a/src/network/connectionpool.py b/src/network/connectionpool.py\n--- a/src/network/connectionpool.py\n+++ b/src/network/connectionpool.py\n@@ -93,7 +93,7 @@\n del self.inboundConnections[connection.destination.host]\n except KeyError:\n pass\n- connection.close()\n+ connection.handle_close()\n \n def getListeningIP(self):\n if BMConfigParser().safeGet(\"bitmessagesettings\", \"onionhostname\").endswith(\".onion\"):\n@@ -202,7 +202,6 @@\n self.inboundConnections.values() +\n self.outboundConnections.values()\n ):\n- i.set_state(\"close\")\n # FIXME: rating will be increased after next connection\n i.handle_close()\n", "issue": "Duplicate connections to some onion peers\nThe description is based on messages from `[chan] bitmessage` but I've seen it myself a couple of weeks ago (I thought it's related to my #1394 - wrongly).\r\n\r\n![image](https://user-images.githubusercontent.com/4012700/49939372-56add980-fee5-11e8-8f6c-de2f83123ebd.png)\r\n\r\nChanges proposed today:\r\n```patch\r\ndiff --git a/src/network/connectionpool.py b/src/network/connectionpool.py\r\nindex e599cdf..c5ba701 100644\r\n--- a/src/network/connectionpool.py\r\n+++ b/src/network/connectionpool.py\r\n@@ -93,7 +93,7 @@ class BMConnectionPool(object):\r\n del self.inboundConnections[connection.destination.host]\r\n except KeyError:\r\n pass\r\n- connection.close()\r\n+ connection.handle_close()\r\n \r\n def getListeningIP(self):\r\n if BMConfigParser().safeGet(\"bitmessagesettings\", \"onionhostname\").endswith(\".onion\"):\r\n\r\n```\n", "before_files": [{"content": "from ConfigParser import NoOptionError, NoSectionError\nimport errno\nimport socket\nimport time\nimport random\nimport re\n\nfrom bmconfigparser import BMConfigParser\nfrom debug import logger\nimport helper_bootstrap\nimport knownnodes\nfrom network.proxy import Proxy\nfrom network.tcp import TCPServer, Socks5BMConnection, Socks4aBMConnection, TCPConnection\nfrom network.udp import UDPSocket\nfrom network.connectionchooser import chooseConnection\nimport network.asyncore_pollchoose as asyncore\nimport protocol\nfrom singleton import Singleton\nimport state\nimport helper_random\n\n\n@Singleton\nclass BMConnectionPool(object):\n def __init__(self):\n asyncore.set_rates(\n BMConfigParser().safeGetInt(\"bitmessagesettings\", \"maxdownloadrate\"),\n BMConfigParser().safeGetInt(\"bitmessagesettings\", \"maxuploadrate\"))\n self.outboundConnections = {}\n self.inboundConnections = {}\n self.listeningSockets = {}\n self.udpSockets = {}\n self.streams = []\n self.lastSpawned = 0\n self.spawnWait = 2 \n self.bootstrapped = False\n\n def connectToStream(self, streamNumber):\n self.streams.append(streamNumber)\n\n def getConnectionByAddr(self, addr):\n if addr in self.inboundConnections:\n return self.inboundConnections[addr]\n try:\n if addr.host in self.inboundConnections:\n return self.inboundConnections[addr.host]\n except AttributeError:\n pass\n if addr in self.outboundConnections:\n return self.outboundConnections[addr]\n try:\n if addr.host in self.udpSockets:\n return self.udpSockets[addr.host]\n except AttributeError:\n pass\n raise KeyError\n\n def isAlreadyConnected(self, nodeid):\n for i in self.inboundConnections.values() + self.outboundConnections.values():\n try:\n if nodeid == i.nodeid:\n return True\n except AttributeError:\n pass\n return False\n\n def addConnection(self, connection):\n if isinstance(connection, UDPSocket):\n return\n if connection.isOutbound:\n self.outboundConnections[connection.destination] = connection\n else:\n if connection.destination.host in self.inboundConnections:\n self.inboundConnections[connection.destination] = connection\n else:\n self.inboundConnections[connection.destination.host] = connection\n\n def removeConnection(self, connection):\n if isinstance(connection, UDPSocket):\n del self.udpSockets[connection.listening.host]\n elif isinstance(connection, TCPServer):\n del self.listeningSockets[state.Peer(connection.destination.host, connection.destination.port)]\n elif connection.isOutbound:\n try:\n del self.outboundConnections[connection.destination]\n except KeyError:\n pass\n else:\n try:\n del self.inboundConnections[connection.destination]\n except KeyError:\n try:\n del self.inboundConnections[connection.destination.host]\n except KeyError:\n pass\n connection.close()\n\n def getListeningIP(self):\n if BMConfigParser().safeGet(\"bitmessagesettings\", \"onionhostname\").endswith(\".onion\"):\n host = BMConfigParser().safeGet(\"bitmessagesettings\", \"onionbindip\")\n else:\n host = '127.0.0.1'\n if BMConfigParser().safeGetBoolean(\"bitmessagesettings\", \"sockslisten\") or \\\n BMConfigParser().get(\"bitmessagesettings\", \"socksproxytype\") == \"none\":\n # python doesn't like bind + INADDR_ANY?\n #host = socket.INADDR_ANY\n host = BMConfigParser().get(\"network\", \"bind\")\n return host\n\n def startListening(self, bind=None):\n if bind is None:\n bind = self.getListeningIP()\n port = BMConfigParser().safeGetInt(\"bitmessagesettings\", \"port\")\n # correct port even if it changed\n ls = TCPServer(host=bind, port=port)\n self.listeningSockets[ls.destination] = ls\n\n def startUDPSocket(self, bind=None):\n if bind is None:\n host = self.getListeningIP()\n udpSocket = UDPSocket(host=host, announcing=True)\n else:\n if bind is False:\n udpSocket = UDPSocket(announcing=False)\n else:\n udpSocket = UDPSocket(host=bind, announcing=True)\n self.udpSockets[udpSocket.listening.host] = udpSocket\n\n def loop(self):\n # defaults to empty loop if outbound connections are maxed\n spawnConnections = False\n acceptConnections = True\n if BMConfigParser().safeGetBoolean('bitmessagesettings', 'dontconnect'):\n acceptConnections = False\n elif BMConfigParser().safeGetBoolean('bitmessagesettings', 'sendoutgoingconnections'):\n spawnConnections = True\n if BMConfigParser().get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS' and \\\n (not BMConfigParser().getboolean('bitmessagesettings', 'sockslisten') and \\\n \".onion\" not in BMConfigParser().get('bitmessagesettings', 'onionhostname')):\n acceptConnections = False\n\n if spawnConnections:\n if not knownnodes.knownNodesActual:\n helper_bootstrap.dns()\n if not self.bootstrapped:\n self.bootstrapped = True\n Proxy.proxy = (BMConfigParser().safeGet(\"bitmessagesettings\", \"sockshostname\"),\n BMConfigParser().safeGetInt(\"bitmessagesettings\", \"socksport\"))\n # TODO AUTH\n # TODO reset based on GUI settings changes\n try:\n if not BMConfigParser().get(\"network\", \"onionsocksproxytype\").startswith(\"SOCKS\"):\n raise NoOptionError\n Proxy.onionproxy = (BMConfigParser().get(\"network\", \"onionsockshostname\"),\n BMConfigParser().getint(\"network\", \"onionsocksport\"))\n except (NoOptionError, NoSectionError):\n Proxy.onionproxy = None\n established = sum(1 for c in self.outboundConnections.values() if (c.connected and c.fullyEstablished))\n pending = len(self.outboundConnections) - established\n if established < BMConfigParser().safeGetInt(\"bitmessagesettings\", \"maxoutboundconnections\"):\n for i in range(state.maximumNumberOfHalfOpenConnections - pending):\n try:\n chosen = chooseConnection(helper_random.randomchoice(self.streams))\n except ValueError:\n continue\n if chosen in self.outboundConnections:\n continue\n if chosen.host in self.inboundConnections:\n continue\n # don't connect to self\n if chosen in state.ownAddresses:\n continue\n \n #for c in self.outboundConnections:\n # if chosen == c.destination:\n # continue\n #for c in self.inboundConnections:\n # if chosen.host == c.destination.host:\n # continue\n try:\n if chosen.host.endswith(\".onion\") and Proxy.onionproxy is not None:\n if BMConfigParser().get(\"network\", \"onionsocksproxytype\") == \"SOCKS5\":\n self.addConnection(Socks5BMConnection(chosen))\n elif BMConfigParser().get(\"network\", \"onionsocksproxytype\") == \"SOCKS4a\":\n self.addConnection(Socks4aBMConnection(chosen))\n elif BMConfigParser().safeGet(\"bitmessagesettings\", \"socksproxytype\") == \"SOCKS5\":\n self.addConnection(Socks5BMConnection(chosen))\n elif BMConfigParser().safeGet(\"bitmessagesettings\", \"socksproxytype\") == \"SOCKS4a\":\n self.addConnection(Socks4aBMConnection(chosen))\n else:\n self.addConnection(TCPConnection(chosen))\n except socket.error as e:\n if e.errno == errno.ENETUNREACH:\n continue\n except (NoSectionError, NoOptionError):\n # shouldn't happen\n pass\n\n self.lastSpawned = time.time()\n else:\n for i in (\n self.inboundConnections.values() +\n self.outboundConnections.values()\n ):\n i.set_state(\"close\")\n # FIXME: rating will be increased after next connection\n i.handle_close()\n\n if acceptConnections:\n if not self.listeningSockets:\n if BMConfigParser().safeGet(\"network\", \"bind\") == '':\n self.startListening()\n else:\n for bind in re.sub(\"[^\\w.]+\", \" \", BMConfigParser().safeGet(\"network\", \"bind\")).split():\n self.startListening(bind)\n logger.info('Listening for incoming connections.')\n if not self.udpSockets:\n if BMConfigParser().safeGet(\"network\", \"bind\") == '':\n self.startUDPSocket()\n else:\n for bind in re.sub(\"[^\\w.]+\", \" \", BMConfigParser().safeGet(\"network\", \"bind\")).split():\n self.startUDPSocket(bind)\n self.startUDPSocket(False)\n logger.info('Starting UDP socket(s).')\n else:\n if self.listeningSockets:\n for i in self.listeningSockets.values():\n i.close_reason = \"Stopping listening\"\n i.accepting = i.connecting = i.connected = False\n logger.info('Stopped listening for incoming connections.')\n if self.udpSockets:\n for i in self.udpSockets.values():\n i.close_reason = \"Stopping UDP socket\"\n i.accepting = i.connecting = i.connected = False\n logger.info('Stopped udp sockets.')\n\n loopTime = float(self.spawnWait)\n if self.lastSpawned < time.time() - self.spawnWait:\n loopTime = 2.0\n asyncore.loop(timeout=loopTime, count=1000)\n\n reaper = []\n for i in self.inboundConnections.values() + self.outboundConnections.values():\n minTx = time.time() - 20\n if i.fullyEstablished:\n minTx -= 300 - 20\n if i.lastTx < minTx:\n if i.fullyEstablished:\n i.append_write_buf(protocol.CreatePacket('ping'))\n else:\n i.close_reason = \"Timeout (%is)\" % (time.time() - i.lastTx) \n i.set_state(\"close\")\n for i in self.inboundConnections.values() + self.outboundConnections.values() + self.listeningSockets.values() + self.udpSockets.values():\n if not (i.accepting or i.connecting or i.connected):\n reaper.append(i)\n else:\n try:\n if i.state == \"close\":\n reaper.append(i)\n except AttributeError:\n pass\n for i in reaper:\n self.removeConnection(i)\n", "path": "src/network/connectionpool.py"}]}
3,789
161
gh_patches_debug_48679
rasdani/github-patches
git_diff
ethereum__web3.py-2659
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> protobuf dependency compatibility * Python: 3.5 * OS: osx * `import web3` output ``` ContextualVersionConflict ``` ### What was wrong? [protobuf](https://github.com/ethereum/web3.py/pull/1493) compatibility needs updating. Needed to downgrade protobuf to get it working. Version currently needs to be >4 but protobuf's latest version is 4.21.6 ### How can it be fixed? The newest version of protobuf should be compatible https://pypi.org/project/protobuf/ </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 from setuptools import ( 3 find_packages, 4 setup, 5 ) 6 7 extras_require = { 8 "tester": [ 9 "eth-tester[py-evm]==v0.6.0-beta.6", 10 "py-geth>=3.9.1,<4", 11 ], 12 "linter": [ 13 "flake8==3.8.3", 14 "isort>=4.2.15,<4.3.5", 15 "mypy==0.910", 16 "types-setuptools>=57.4.4,<58", 17 "types-requests>=2.26.1,<3", 18 "types-protobuf==3.19.13", 19 ], 20 "docs": [ 21 "mock", 22 "sphinx-better-theme>=0.1.4", 23 "click>=5.1", 24 "configparser==3.5.0", 25 "contextlib2>=0.5.4", 26 "py-geth>=3.9.1,<4", 27 "py-solc>=0.4.0", 28 "pytest>=4.4.0,<5.0.0", 29 "sphinx>=3.0,<4", 30 "sphinx_rtd_theme>=0.1.9", 31 "toposort>=1.4", 32 "towncrier==18.5.0", 33 "urllib3", 34 "wheel", 35 "Jinja2<=3.0.3", # Jinja v3.1.0 dropped support for python 3.6 36 ], 37 "dev": [ 38 "bumpversion", 39 "flaky>=3.7.0,<4", 40 "hypothesis>=3.31.2,<6", 41 "pytest>=4.4.0,<5.0.0", 42 "pytest-asyncio>=0.10.0,<0.11", 43 "pytest-mock>=1.10,<2", 44 "pytest-pythonpath>=0.3", 45 "pytest-watch>=4.2,<5", 46 "pytest-xdist>=1.29,<2", 47 "setuptools>=38.6.0", 48 "tox>=1.8.0", 49 "tqdm>4.32,<5", 50 "twine>=1.13,<2", 51 "pluggy==0.13.1", 52 "when-changed>=0.3.0,<0.4", 53 ], 54 } 55 56 extras_require["dev"] = ( 57 extras_require["tester"] 58 + extras_require["linter"] 59 + extras_require["docs"] 60 + extras_require["dev"] 61 ) 62 63 with open("./README.md") as readme: 64 long_description = readme.read() 65 66 setup( 67 name="web3", 68 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility. 69 version="5.31.0", 70 description="""Web3.py""", 71 long_description_content_type="text/markdown", 72 long_description=long_description, 73 author="Piper Merriam", 74 author_email="pipermerriam@gmail.com", 75 url="https://github.com/ethereum/web3.py", 76 include_package_data=True, 77 install_requires=[ 78 "aiohttp>=3.7.4.post0,<4", 79 "eth-abi>=2.0.0b6,<3.0.0", 80 "eth-account>=0.5.9,<0.6.0", 81 "eth-hash[pycryptodome]>=0.2.0,<1.0.0", 82 # eth-account allows too broad of an eth-rlp dependency. 83 # This eth-rlp pin can be removed once it gets tightened up in eth-account 84 "eth-rlp<0.3", 85 "eth-typing>=2.0.0,<3.0.0", 86 "eth-utils>=1.9.5,<2.0.0", 87 "hexbytes>=0.1.0,<1.0.0", 88 "ipfshttpclient==0.8.0a2", 89 "jsonschema>=3.2.0,<5", 90 "lru-dict>=1.1.6,<2.0.0", 91 "protobuf>=3.10.0,<4", 92 "pywin32>=223;platform_system=='Windows'", 93 "requests>=2.16.0,<3.0.0", 94 # remove typing_extensions after python_requires>=3.8, see web3._utils.compat 95 "typing-extensions>=3.7.4.1,<5;python_version<'3.8'", 96 "websockets>=9.1,<10", 97 ], 98 python_requires=">=3.6,<4", 99 extras_require=extras_require, 100 py_modules=["web3", "ens", "ethpm"], 101 entry_points={"pytest11": ["pytest_ethereum = web3.tools.pytest_ethereum.plugins"]}, 102 license="MIT", 103 zip_safe=False, 104 keywords="ethereum", 105 packages=find_packages(exclude=["tests", "tests.*"]), 106 package_data={"web3": ["py.typed"]}, 107 classifiers=[ 108 "Development Status :: 5 - Production/Stable", 109 "Intended Audience :: Developers", 110 "License :: OSI Approved :: MIT License", 111 "Natural Language :: English", 112 "Programming Language :: Python :: 3", 113 "Programming Language :: Python :: 3.6", 114 "Programming Language :: Python :: 3.7", 115 "Programming Language :: Python :: 3.8", 116 "Programming Language :: Python :: 3.9", 117 ], 118 ) 119 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -88,7 +88,7 @@ "ipfshttpclient==0.8.0a2", "jsonschema>=3.2.0,<5", "lru-dict>=1.1.6,<2.0.0", - "protobuf>=3.10.0,<4", + "protobuf==3.19.4", "pywin32>=223;platform_system=='Windows'", "requests>=2.16.0,<3.0.0", # remove typing_extensions after python_requires>=3.8, see web3._utils.compat
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -88,7 +88,7 @@\n \"ipfshttpclient==0.8.0a2\",\n \"jsonschema>=3.2.0,<5\",\n \"lru-dict>=1.1.6,<2.0.0\",\n- \"protobuf>=3.10.0,<4\",\n+ \"protobuf==3.19.4\",\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n", "issue": "protobuf dependency compatibility\n* Python: 3.5\r\n* OS: osx\r\n* `import web3` output\r\n\r\n```\r\nContextualVersionConflict\r\n```\r\n\r\n### What was wrong?\r\n\r\n[protobuf](https://github.com/ethereum/web3.py/pull/1493) compatibility needs updating. Needed to downgrade protobuf to get it working. Version currently needs to be >4 but protobuf's latest version is 4.21.6\r\n\r\n### How can it be fixed?\r\n\r\nThe newest version of protobuf should be compatible https://pypi.org/project/protobuf/\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n \"tester\": [\n \"eth-tester[py-evm]==v0.6.0-beta.6\",\n \"py-geth>=3.9.1,<4\",\n ],\n \"linter\": [\n \"flake8==3.8.3\",\n \"isort>=4.2.15,<4.3.5\",\n \"mypy==0.910\",\n \"types-setuptools>=57.4.4,<58\",\n \"types-requests>=2.26.1,<3\",\n \"types-protobuf==3.19.13\",\n ],\n \"docs\": [\n \"mock\",\n \"sphinx-better-theme>=0.1.4\",\n \"click>=5.1\",\n \"configparser==3.5.0\",\n \"contextlib2>=0.5.4\",\n \"py-geth>=3.9.1,<4\",\n \"py-solc>=0.4.0\",\n \"pytest>=4.4.0,<5.0.0\",\n \"sphinx>=3.0,<4\",\n \"sphinx_rtd_theme>=0.1.9\",\n \"toposort>=1.4\",\n \"towncrier==18.5.0\",\n \"urllib3\",\n \"wheel\",\n \"Jinja2<=3.0.3\", # Jinja v3.1.0 dropped support for python 3.6\n ],\n \"dev\": [\n \"bumpversion\",\n \"flaky>=3.7.0,<4\",\n \"hypothesis>=3.31.2,<6\",\n \"pytest>=4.4.0,<5.0.0\",\n \"pytest-asyncio>=0.10.0,<0.11\",\n \"pytest-mock>=1.10,<2\",\n \"pytest-pythonpath>=0.3\",\n \"pytest-watch>=4.2,<5\",\n \"pytest-xdist>=1.29,<2\",\n \"setuptools>=38.6.0\",\n \"tox>=1.8.0\",\n \"tqdm>4.32,<5\",\n \"twine>=1.13,<2\",\n \"pluggy==0.13.1\",\n \"when-changed>=0.3.0,<0.4\",\n ],\n}\n\nextras_require[\"dev\"] = (\n extras_require[\"tester\"]\n + extras_require[\"linter\"]\n + extras_require[\"docs\"]\n + extras_require[\"dev\"]\n)\n\nwith open(\"./README.md\") as readme:\n long_description = readme.read()\n\nsetup(\n name=\"web3\",\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version=\"5.31.0\",\n description=\"\"\"Web3.py\"\"\",\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n author=\"Piper Merriam\",\n author_email=\"pipermerriam@gmail.com\",\n url=\"https://github.com/ethereum/web3.py\",\n include_package_data=True,\n install_requires=[\n \"aiohttp>=3.7.4.post0,<4\",\n \"eth-abi>=2.0.0b6,<3.0.0\",\n \"eth-account>=0.5.9,<0.6.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n # eth-account allows too broad of an eth-rlp dependency.\n # This eth-rlp pin can be removed once it gets tightened up in eth-account\n \"eth-rlp<0.3\",\n \"eth-typing>=2.0.0,<3.0.0\",\n \"eth-utils>=1.9.5,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"ipfshttpclient==0.8.0a2\",\n \"jsonschema>=3.2.0,<5\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"protobuf>=3.10.0,<4\",\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n \"typing-extensions>=3.7.4.1,<5;python_version<'3.8'\",\n \"websockets>=9.1,<10\",\n ],\n python_requires=\">=3.6,<4\",\n extras_require=extras_require,\n py_modules=[\"web3\", \"ens\", \"ethpm\"],\n entry_points={\"pytest11\": [\"pytest_ethereum = web3.tools.pytest_ethereum.plugins\"]},\n license=\"MIT\",\n zip_safe=False,\n keywords=\"ethereum\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"web3\": [\"py.typed\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n)\n", "path": "setup.py"}]}
2,130
158
gh_patches_debug_37097
rasdani/github-patches
git_diff
AUTOMATIC1111__stable-diffusion-webui-12975
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Feature Request]: Where is the save style button? ### Is there an existing issue for this? - [X] I have searched the existing issues and checked the recent builds/commits ### What would your feature do ? Is it possible to make the old implementation of save style as well? Not being able to save the currently typed prompt is very troublesome. Why do we have to open the edit screen and copy/paste the prompt? ### Proposed workflow Restore old implementation of save styles button ### Additional information _No response_ </issue> <code> [start of modules/ui_prompt_styles.py] 1 import gradio as gr 2 3 from modules import shared, ui_common, ui_components, styles 4 5 styles_edit_symbol = '\U0001f58c\uFE0F' # 🖌️ 6 styles_materialize_symbol = '\U0001f4cb' # 📋 7 8 9 def select_style(name): 10 style = shared.prompt_styles.styles.get(name) 11 existing = style is not None 12 empty = not name 13 14 prompt = style.prompt if style else gr.update() 15 negative_prompt = style.negative_prompt if style else gr.update() 16 17 return prompt, negative_prompt, gr.update(visible=existing), gr.update(visible=not empty) 18 19 20 def save_style(name, prompt, negative_prompt): 21 if not name: 22 return gr.update(visible=False) 23 24 style = styles.PromptStyle(name, prompt, negative_prompt) 25 shared.prompt_styles.styles[style.name] = style 26 shared.prompt_styles.save_styles(shared.styles_filename) 27 28 return gr.update(visible=True) 29 30 31 def delete_style(name): 32 if name == "": 33 return 34 35 shared.prompt_styles.styles.pop(name, None) 36 shared.prompt_styles.save_styles(shared.styles_filename) 37 38 return '', '', '' 39 40 41 def materialize_styles(prompt, negative_prompt, styles): 42 prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) 43 negative_prompt = shared.prompt_styles.apply_negative_styles_to_prompt(negative_prompt, styles) 44 45 return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=negative_prompt), gr.Dropdown.update(value=[])] 46 47 48 def refresh_styles(): 49 return gr.update(choices=list(shared.prompt_styles.styles)), gr.update(choices=list(shared.prompt_styles.styles)) 50 51 52 class UiPromptStyles: 53 def __init__(self, tabname, main_ui_prompt, main_ui_negative_prompt): 54 self.tabname = tabname 55 56 with gr.Row(elem_id=f"{tabname}_styles_row"): 57 self.dropdown = gr.Dropdown(label="Styles", show_label=False, elem_id=f"{tabname}_styles", choices=list(shared.prompt_styles.styles), value=[], multiselect=True, tooltip="Styles") 58 edit_button = ui_components.ToolButton(value=styles_edit_symbol, elem_id=f"{tabname}_styles_edit_button", tooltip="Edit styles") 59 60 with gr.Box(elem_id=f"{tabname}_styles_dialog", elem_classes="popup-dialog") as styles_dialog: 61 with gr.Row(): 62 self.selection = gr.Dropdown(label="Styles", elem_id=f"{tabname}_styles_edit_select", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info="Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.") 63 ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {"choices": list(shared.prompt_styles.styles)}, f"refresh_{tabname}_styles") 64 self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.") 65 66 with gr.Row(): 67 self.prompt = gr.Textbox(label="Prompt", show_label=True, elem_id=f"{tabname}_edit_style_prompt", lines=3) 68 69 with gr.Row(): 70 self.neg_prompt = gr.Textbox(label="Negative prompt", show_label=True, elem_id=f"{tabname}_edit_style_neg_prompt", lines=3) 71 72 with gr.Row(): 73 self.save = gr.Button('Save', variant='primary', elem_id=f'{tabname}_edit_style_save', visible=False) 74 self.delete = gr.Button('Delete', variant='primary', elem_id=f'{tabname}_edit_style_delete', visible=False) 75 self.close = gr.Button('Close', variant='secondary', elem_id=f'{tabname}_edit_style_close') 76 77 self.selection.change( 78 fn=select_style, 79 inputs=[self.selection], 80 outputs=[self.prompt, self.neg_prompt, self.delete, self.save], 81 show_progress=False, 82 ) 83 84 self.save.click( 85 fn=save_style, 86 inputs=[self.selection, self.prompt, self.neg_prompt], 87 outputs=[self.delete], 88 show_progress=False, 89 ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False) 90 91 self.delete.click( 92 fn=delete_style, 93 _js='function(name){ if(name == "") return ""; return confirm("Delete style " + name + "?") ? name : ""; }', 94 inputs=[self.selection], 95 outputs=[self.selection, self.prompt, self.neg_prompt], 96 show_progress=False, 97 ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False) 98 99 self.materialize.click( 100 fn=materialize_styles, 101 inputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown], 102 outputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown], 103 show_progress=False, 104 ).then(fn=None, _js="function(){update_"+tabname+"_tokens(); closePopup();}", show_progress=False) 105 106 ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close) 107 108 109 110 111 [end of modules/ui_prompt_styles.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py --- a/modules/ui_prompt_styles.py +++ b/modules/ui_prompt_styles.py @@ -4,6 +4,7 @@ styles_edit_symbol = '\U0001f58c\uFE0F' # 🖌️ styles_materialize_symbol = '\U0001f4cb' # 📋 +styles_copy_symbol = '\U0001f4dd' # 📝 def select_style(name): @@ -62,6 +63,7 @@ self.selection = gr.Dropdown(label="Styles", elem_id=f"{tabname}_styles_edit_select", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info="Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.") ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {"choices": list(shared.prompt_styles.styles)}, f"refresh_{tabname}_styles") self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.") + self.copy = ui_components.ToolButton(value=styles_copy_symbol, elem_id=f"{tabname}_style_copy", tooltip="Copy main UI prompt to style.") with gr.Row(): self.prompt = gr.Textbox(label="Prompt", show_label=True, elem_id=f"{tabname}_edit_style_prompt", lines=3) @@ -103,6 +105,13 @@ show_progress=False, ).then(fn=None, _js="function(){update_"+tabname+"_tokens(); closePopup();}", show_progress=False) + self.copy.click( + fn=lambda p, n: (p, n), + inputs=[main_ui_prompt, main_ui_negative_prompt], + outputs=[self.prompt, self.neg_prompt], + show_progress=False, + ) + ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close)
{"golden_diff": "diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py\n--- a/modules/ui_prompt_styles.py\n+++ b/modules/ui_prompt_styles.py\n@@ -4,6 +4,7 @@\n \r\n styles_edit_symbol = '\\U0001f58c\\uFE0F' # \ud83d\udd8c\ufe0f\r\n styles_materialize_symbol = '\\U0001f4cb' # \ud83d\udccb\r\n+styles_copy_symbol = '\\U0001f4dd' # \ud83d\udcdd\r\n \r\n \r\n def select_style(name):\r\n@@ -62,6 +63,7 @@\n self.selection = gr.Dropdown(label=\"Styles\", elem_id=f\"{tabname}_styles_edit_select\", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info=\"Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.\")\r\n ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {\"choices\": list(shared.prompt_styles.styles)}, f\"refresh_{tabname}_styles\")\r\n self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f\"{tabname}_style_apply\", tooltip=\"Apply all selected styles from the style selction dropdown in main UI to the prompt.\")\r\n+ self.copy = ui_components.ToolButton(value=styles_copy_symbol, elem_id=f\"{tabname}_style_copy\", tooltip=\"Copy main UI prompt to style.\")\r\n \r\n with gr.Row():\r\n self.prompt = gr.Textbox(label=\"Prompt\", show_label=True, elem_id=f\"{tabname}_edit_style_prompt\", lines=3)\r\n@@ -103,6 +105,13 @@\n show_progress=False,\r\n ).then(fn=None, _js=\"function(){update_\"+tabname+\"_tokens(); closePopup();}\", show_progress=False)\r\n \r\n+ self.copy.click(\r\n+ fn=lambda p, n: (p, n),\r\n+ inputs=[main_ui_prompt, main_ui_negative_prompt],\r\n+ outputs=[self.prompt, self.neg_prompt],\r\n+ show_progress=False,\r\n+ )\r\n+\r\n ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close)\n", "issue": "[Feature Request]: Where is the save style button?\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues and checked the recent builds/commits\n\n### What would your feature do ?\n\nIs it possible to make the old implementation of save style as well?\r\nNot being able to save the currently typed prompt is very troublesome.\r\nWhy do we have to open the edit screen and copy/paste the prompt?\n\n### Proposed workflow\n\nRestore old implementation of save styles button\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "import gradio as gr\r\n\r\nfrom modules import shared, ui_common, ui_components, styles\r\n\r\nstyles_edit_symbol = '\\U0001f58c\\uFE0F' # \ud83d\udd8c\ufe0f\r\nstyles_materialize_symbol = '\\U0001f4cb' # \ud83d\udccb\r\n\r\n\r\ndef select_style(name):\r\n style = shared.prompt_styles.styles.get(name)\r\n existing = style is not None\r\n empty = not name\r\n\r\n prompt = style.prompt if style else gr.update()\r\n negative_prompt = style.negative_prompt if style else gr.update()\r\n\r\n return prompt, negative_prompt, gr.update(visible=existing), gr.update(visible=not empty)\r\n\r\n\r\ndef save_style(name, prompt, negative_prompt):\r\n if not name:\r\n return gr.update(visible=False)\r\n\r\n style = styles.PromptStyle(name, prompt, negative_prompt)\r\n shared.prompt_styles.styles[style.name] = style\r\n shared.prompt_styles.save_styles(shared.styles_filename)\r\n\r\n return gr.update(visible=True)\r\n\r\n\r\ndef delete_style(name):\r\n if name == \"\":\r\n return\r\n\r\n shared.prompt_styles.styles.pop(name, None)\r\n shared.prompt_styles.save_styles(shared.styles_filename)\r\n\r\n return '', '', ''\r\n\r\n\r\ndef materialize_styles(prompt, negative_prompt, styles):\r\n prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles)\r\n negative_prompt = shared.prompt_styles.apply_negative_styles_to_prompt(negative_prompt, styles)\r\n\r\n return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=negative_prompt), gr.Dropdown.update(value=[])]\r\n\r\n\r\ndef refresh_styles():\r\n return gr.update(choices=list(shared.prompt_styles.styles)), gr.update(choices=list(shared.prompt_styles.styles))\r\n\r\n\r\nclass UiPromptStyles:\r\n def __init__(self, tabname, main_ui_prompt, main_ui_negative_prompt):\r\n self.tabname = tabname\r\n\r\n with gr.Row(elem_id=f\"{tabname}_styles_row\"):\r\n self.dropdown = gr.Dropdown(label=\"Styles\", show_label=False, elem_id=f\"{tabname}_styles\", choices=list(shared.prompt_styles.styles), value=[], multiselect=True, tooltip=\"Styles\")\r\n edit_button = ui_components.ToolButton(value=styles_edit_symbol, elem_id=f\"{tabname}_styles_edit_button\", tooltip=\"Edit styles\")\r\n\r\n with gr.Box(elem_id=f\"{tabname}_styles_dialog\", elem_classes=\"popup-dialog\") as styles_dialog:\r\n with gr.Row():\r\n self.selection = gr.Dropdown(label=\"Styles\", elem_id=f\"{tabname}_styles_edit_select\", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info=\"Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.\")\r\n ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {\"choices\": list(shared.prompt_styles.styles)}, f\"refresh_{tabname}_styles\")\r\n self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f\"{tabname}_style_apply\", tooltip=\"Apply all selected styles from the style selction dropdown in main UI to the prompt.\")\r\n\r\n with gr.Row():\r\n self.prompt = gr.Textbox(label=\"Prompt\", show_label=True, elem_id=f\"{tabname}_edit_style_prompt\", lines=3)\r\n\r\n with gr.Row():\r\n self.neg_prompt = gr.Textbox(label=\"Negative prompt\", show_label=True, elem_id=f\"{tabname}_edit_style_neg_prompt\", lines=3)\r\n\r\n with gr.Row():\r\n self.save = gr.Button('Save', variant='primary', elem_id=f'{tabname}_edit_style_save', visible=False)\r\n self.delete = gr.Button('Delete', variant='primary', elem_id=f'{tabname}_edit_style_delete', visible=False)\r\n self.close = gr.Button('Close', variant='secondary', elem_id=f'{tabname}_edit_style_close')\r\n\r\n self.selection.change(\r\n fn=select_style,\r\n inputs=[self.selection],\r\n outputs=[self.prompt, self.neg_prompt, self.delete, self.save],\r\n show_progress=False,\r\n )\r\n\r\n self.save.click(\r\n fn=save_style,\r\n inputs=[self.selection, self.prompt, self.neg_prompt],\r\n outputs=[self.delete],\r\n show_progress=False,\r\n ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False)\r\n\r\n self.delete.click(\r\n fn=delete_style,\r\n _js='function(name){ if(name == \"\") return \"\"; return confirm(\"Delete style \" + name + \"?\") ? name : \"\"; }',\r\n inputs=[self.selection],\r\n outputs=[self.selection, self.prompt, self.neg_prompt],\r\n show_progress=False,\r\n ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False)\r\n\r\n self.materialize.click(\r\n fn=materialize_styles,\r\n inputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown],\r\n outputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown],\r\n show_progress=False,\r\n ).then(fn=None, _js=\"function(){update_\"+tabname+\"_tokens(); closePopup();}\", show_progress=False)\r\n\r\n ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close)\r\n\r\n\r\n\r\n\r\n", "path": "modules/ui_prompt_styles.py"}]}
1,986
490
gh_patches_debug_11197
rasdani/github-patches
git_diff
ESMCI__cime-2860
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> floating point mpiprocs when running ./case.setup with python3 I noticed that when running with python3, mpiprocs is set to be a float, i.e., $ python3 ./case.setup # will create the following in .case.run: #PBS -l select=5:ncpus=36:mpiprocs=36.0:ompthreads=1 $ python2 ./case.setup # will create the following .case.run: #PBS -l select=5:ncpus=36:mpiprocs=36:ompthreads=1 NOTE: You'll need to rm .case.run, in between ./case.setup executions to see the difference. I haven't looked this into depth, but I bet it has to do with "true division" that comes with python3. </issue> <code> [start of scripts/lib/CIME/XML/env_mach_pes.py] 1 """ 2 Interface to the env_mach_pes.xml file. This class inherits from EntryID 3 """ 4 from CIME.XML.standard_module_setup import * 5 from CIME.XML.env_base import EnvBase 6 import math 7 8 logger = logging.getLogger(__name__) 9 10 class EnvMachPes(EnvBase): 11 12 def __init__(self, case_root=None, infile="env_mach_pes.xml", components=None): 13 """ 14 initialize an object interface to file env_mach_pes.xml in the case directory 15 """ 16 self._components = components 17 schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_mach_pes.xsd") 18 EnvBase.__init__(self, case_root, infile, schema=schema) 19 20 def add_comment(self, comment): 21 if comment is not None: 22 node = self.make_child("comment", text=comment) 23 # make_child adds to the end of the file but we want it to follow the header 24 # so we need to remove it and add it in the correct position 25 self.remove_child(node) 26 self.add_child(node, position=1) 27 28 def get_value(self, vid, attribute=None, resolved=True, subgroup=None, max_mpitasks_per_node=None): # pylint: disable=arguments-differ 29 # Special variable NINST_MAX is used to determine the number of 30 # drivers in multi-driver mode. 31 if vid == "NINST_MAX": 32 value = 1 33 for comp in self._components: 34 if comp != "CPL": 35 value = max(value, self.get_value("NINST_{}".format(comp))) 36 return value 37 38 value = EnvBase.get_value(self, vid, attribute, resolved, subgroup) 39 40 if "NTASKS" in vid or "ROOTPE" in vid: 41 if max_mpitasks_per_node is None: 42 max_mpitasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") 43 if value is not None and value < 0: 44 value = -1*value*max_mpitasks_per_node 45 46 return value 47 48 def set_value(self, vid, value, subgroup=None, ignore_type=False): 49 """ 50 Set the value of an entry-id field to value 51 Returns the value or None if not found 52 subgroup is ignored in the general routine and applied in specific methods 53 """ 54 if vid == "MULTI_DRIVER" and value: 55 ninst_max = self.get_value("NINST_MAX") 56 for comp in self._components: 57 if comp == "CPL": 58 continue 59 ninst = self.get_value("NINST_{}".format(comp)) 60 expect(ninst == ninst_max, 61 "All components must have the same NINST value in multi_driver mode. NINST_{}={} shoud be {}".format(comp,ninst,ninst_max)) 62 if "NTASKS" in vid or "NTHRDS" in vid: 63 expect(value != 0, "Cannot set NTASKS or NTHRDS to 0") 64 65 66 return EnvBase.set_value(self, vid, value, subgroup=subgroup, ignore_type=ignore_type) 67 68 69 def get_max_thread_count(self, comp_classes): 70 ''' Find the maximum number of openmp threads for any component in the case ''' 71 max_threads = 1 72 for comp in comp_classes: 73 threads = self.get_value("NTHRDS",attribute={"compclass":comp}) 74 expect(threads is not None, "Error no thread count found for component class {}".format(comp)) 75 if threads > max_threads: 76 max_threads = threads 77 return max_threads 78 79 def get_total_tasks(self, comp_classes): 80 total_tasks = 0 81 maxinst = 1 82 for comp in comp_classes: 83 ntasks = self.get_value("NTASKS", attribute={"compclass":comp}) 84 rootpe = self.get_value("ROOTPE", attribute={"compclass":comp}) 85 pstrid = self.get_value("PSTRID", attribute={"compclass":comp}) 86 if comp != "CPL": 87 ninst = self.get_value("NINST", attribute={"compclass":comp}) 88 maxinst = max(maxinst, ninst) 89 tt = rootpe + (ntasks - 1) * pstrid + 1 90 total_tasks = max(tt, total_tasks) 91 if self.get_value("MULTI_DRIVER"): 92 total_tasks *= maxinst 93 return total_tasks 94 95 def get_tasks_per_node(self, total_tasks, max_thread_count): 96 expect(total_tasks > 0,"totaltasks > 0 expected, totaltasks = {}".format(total_tasks)) 97 tasks_per_node = min(self.get_value("MAX_TASKS_PER_NODE")/ max_thread_count, 98 self.get_value("MAX_MPITASKS_PER_NODE"), total_tasks) 99 return tasks_per_node if tasks_per_node > 0 else 1 100 101 def get_total_nodes(self, total_tasks, max_thread_count): 102 """ 103 Return (num_active_nodes, num_spare_nodes) 104 """ 105 tasks_per_node = self.get_tasks_per_node(total_tasks, max_thread_count) 106 num_nodes = int(math.ceil(float(total_tasks) / tasks_per_node)) 107 return num_nodes, self.get_spare_nodes(num_nodes) 108 109 def get_spare_nodes(self, num_nodes): 110 force_spare_nodes = self.get_value("FORCE_SPARE_NODES") 111 if force_spare_nodes != -999: 112 return force_spare_nodes 113 114 if self.get_value("ALLOCATE_SPARE_NODES"): 115 ten_pct = int(math.ceil(float(num_nodes) * 0.1)) 116 if ten_pct < 1: 117 return 1 # Always provide at lease one spare node 118 elif ten_pct > 10: 119 return 10 # Never provide more than 10 spare nodes 120 else: 121 return ten_pct 122 else: 123 return 0 124 [end of scripts/lib/CIME/XML/env_mach_pes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/lib/CIME/XML/env_mach_pes.py b/scripts/lib/CIME/XML/env_mach_pes.py --- a/scripts/lib/CIME/XML/env_mach_pes.py +++ b/scripts/lib/CIME/XML/env_mach_pes.py @@ -94,7 +94,7 @@ def get_tasks_per_node(self, total_tasks, max_thread_count): expect(total_tasks > 0,"totaltasks > 0 expected, totaltasks = {}".format(total_tasks)) - tasks_per_node = min(self.get_value("MAX_TASKS_PER_NODE")/ max_thread_count, + tasks_per_node = min(self.get_value("MAX_TASKS_PER_NODE")// max_thread_count, self.get_value("MAX_MPITASKS_PER_NODE"), total_tasks) return tasks_per_node if tasks_per_node > 0 else 1
{"golden_diff": "diff --git a/scripts/lib/CIME/XML/env_mach_pes.py b/scripts/lib/CIME/XML/env_mach_pes.py\n--- a/scripts/lib/CIME/XML/env_mach_pes.py\n+++ b/scripts/lib/CIME/XML/env_mach_pes.py\n@@ -94,7 +94,7 @@\n \n def get_tasks_per_node(self, total_tasks, max_thread_count):\n expect(total_tasks > 0,\"totaltasks > 0 expected, totaltasks = {}\".format(total_tasks))\n- tasks_per_node = min(self.get_value(\"MAX_TASKS_PER_NODE\")/ max_thread_count,\n+ tasks_per_node = min(self.get_value(\"MAX_TASKS_PER_NODE\")// max_thread_count,\n self.get_value(\"MAX_MPITASKS_PER_NODE\"), total_tasks)\n return tasks_per_node if tasks_per_node > 0 else 1\n", "issue": "floating point mpiprocs when running ./case.setup with python3\nI noticed that when running with python3, mpiprocs is set to be a float, i.e.,\r\n\r\n$ python3 ./case.setup # will create the following in .case.run:\r\n#PBS -l select=5:ncpus=36:mpiprocs=36.0:ompthreads=1\r\n\r\n$ python2 ./case.setup # will create the following .case.run:\r\n#PBS -l select=5:ncpus=36:mpiprocs=36:ompthreads=1\r\n\r\nNOTE: You'll need to rm .case.run, in between ./case.setup executions to see the difference.\r\n\r\nI haven't looked this into depth, but I bet it has to do with \"true division\" that comes with python3.\n", "before_files": [{"content": "\"\"\"\nInterface to the env_mach_pes.xml file. This class inherits from EntryID\n\"\"\"\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.XML.env_base import EnvBase\nimport math\n\nlogger = logging.getLogger(__name__)\n\nclass EnvMachPes(EnvBase):\n\n def __init__(self, case_root=None, infile=\"env_mach_pes.xml\", components=None):\n \"\"\"\n initialize an object interface to file env_mach_pes.xml in the case directory\n \"\"\"\n self._components = components\n schema = os.path.join(get_cime_root(), \"config\", \"xml_schemas\", \"env_mach_pes.xsd\")\n EnvBase.__init__(self, case_root, infile, schema=schema)\n\n def add_comment(self, comment):\n if comment is not None:\n node = self.make_child(\"comment\", text=comment)\n # make_child adds to the end of the file but we want it to follow the header\n # so we need to remove it and add it in the correct position\n self.remove_child(node)\n self.add_child(node, position=1)\n\n def get_value(self, vid, attribute=None, resolved=True, subgroup=None, max_mpitasks_per_node=None): # pylint: disable=arguments-differ\n # Special variable NINST_MAX is used to determine the number of\n # drivers in multi-driver mode.\n if vid == \"NINST_MAX\":\n value = 1\n for comp in self._components:\n if comp != \"CPL\":\n value = max(value, self.get_value(\"NINST_{}\".format(comp)))\n return value\n\n value = EnvBase.get_value(self, vid, attribute, resolved, subgroup)\n\n if \"NTASKS\" in vid or \"ROOTPE\" in vid:\n if max_mpitasks_per_node is None:\n max_mpitasks_per_node = self.get_value(\"MAX_MPITASKS_PER_NODE\")\n if value is not None and value < 0:\n value = -1*value*max_mpitasks_per_node\n\n return value\n\n def set_value(self, vid, value, subgroup=None, ignore_type=False):\n \"\"\"\n Set the value of an entry-id field to value\n Returns the value or None if not found\n subgroup is ignored in the general routine and applied in specific methods\n \"\"\"\n if vid == \"MULTI_DRIVER\" and value:\n ninst_max = self.get_value(\"NINST_MAX\")\n for comp in self._components:\n if comp == \"CPL\":\n continue\n ninst = self.get_value(\"NINST_{}\".format(comp))\n expect(ninst == ninst_max,\n \"All components must have the same NINST value in multi_driver mode. NINST_{}={} shoud be {}\".format(comp,ninst,ninst_max))\n if \"NTASKS\" in vid or \"NTHRDS\" in vid:\n expect(value != 0, \"Cannot set NTASKS or NTHRDS to 0\")\n\n\n return EnvBase.set_value(self, vid, value, subgroup=subgroup, ignore_type=ignore_type)\n\n\n def get_max_thread_count(self, comp_classes):\n ''' Find the maximum number of openmp threads for any component in the case '''\n max_threads = 1\n for comp in comp_classes:\n threads = self.get_value(\"NTHRDS\",attribute={\"compclass\":comp})\n expect(threads is not None, \"Error no thread count found for component class {}\".format(comp))\n if threads > max_threads:\n max_threads = threads\n return max_threads\n\n def get_total_tasks(self, comp_classes):\n total_tasks = 0\n maxinst = 1\n for comp in comp_classes:\n ntasks = self.get_value(\"NTASKS\", attribute={\"compclass\":comp})\n rootpe = self.get_value(\"ROOTPE\", attribute={\"compclass\":comp})\n pstrid = self.get_value(\"PSTRID\", attribute={\"compclass\":comp})\n if comp != \"CPL\":\n ninst = self.get_value(\"NINST\", attribute={\"compclass\":comp})\n maxinst = max(maxinst, ninst)\n tt = rootpe + (ntasks - 1) * pstrid + 1\n total_tasks = max(tt, total_tasks)\n if self.get_value(\"MULTI_DRIVER\"):\n total_tasks *= maxinst\n return total_tasks\n\n def get_tasks_per_node(self, total_tasks, max_thread_count):\n expect(total_tasks > 0,\"totaltasks > 0 expected, totaltasks = {}\".format(total_tasks))\n tasks_per_node = min(self.get_value(\"MAX_TASKS_PER_NODE\")/ max_thread_count,\n self.get_value(\"MAX_MPITASKS_PER_NODE\"), total_tasks)\n return tasks_per_node if tasks_per_node > 0 else 1\n\n def get_total_nodes(self, total_tasks, max_thread_count):\n \"\"\"\n Return (num_active_nodes, num_spare_nodes)\n \"\"\"\n tasks_per_node = self.get_tasks_per_node(total_tasks, max_thread_count)\n num_nodes = int(math.ceil(float(total_tasks) / tasks_per_node))\n return num_nodes, self.get_spare_nodes(num_nodes)\n\n def get_spare_nodes(self, num_nodes):\n force_spare_nodes = self.get_value(\"FORCE_SPARE_NODES\")\n if force_spare_nodes != -999:\n return force_spare_nodes\n\n if self.get_value(\"ALLOCATE_SPARE_NODES\"):\n ten_pct = int(math.ceil(float(num_nodes) * 0.1))\n if ten_pct < 1:\n return 1 # Always provide at lease one spare node\n elif ten_pct > 10:\n return 10 # Never provide more than 10 spare nodes\n else:\n return ten_pct\n else:\n return 0\n", "path": "scripts/lib/CIME/XML/env_mach_pes.py"}]}
2,236
179
gh_patches_debug_4066
rasdani/github-patches
git_diff
sublimelsp__LSP-1243
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Server is being shutdown on server sending empty stderr line When server triggers stderr output that is an empty string (or becomes an empty string after `rstrip` then LSP closes the transports and thus the server. Issue found when analyzing https://github.com/sublimelsp/LSP-angular/issues/1 </issue> <code> [start of plugin/core/transports.py] 1 from .logging import exception_log, debug 2 from .types import ClientConfig 3 from .typing import Dict, Any, Optional, IO, Protocol 4 from abc import ABCMeta, abstractmethod 5 from contextlib import closing 6 from queue import Queue 7 import json 8 import os 9 import shutil 10 import socket 11 import sublime 12 import subprocess 13 import threading 14 import time 15 import weakref 16 17 18 TCP_CONNECT_TIMEOUT = 5 19 20 21 class Transport(metaclass=ABCMeta): 22 23 @abstractmethod 24 def send(self, payload: Dict[str, Any]) -> None: 25 pass 26 27 @abstractmethod 28 def close(self) -> None: 29 pass 30 31 32 class TransportCallbacks(Protocol): 33 34 def on_transport_close(self, exit_code: int, exception: Optional[Exception]) -> None: 35 ... 36 37 def on_payload(self, payload: Dict[str, Any]) -> None: 38 ... 39 40 def on_stderr_message(self, message: str) -> None: 41 ... 42 43 44 class JsonRpcTransport(Transport): 45 46 def __init__(self, name: str, process: subprocess.Popen, socket: Optional[socket.socket], reader: IO[bytes], 47 writer: IO[bytes], stderr: Optional[IO[bytes]], callback_object: TransportCallbacks) -> None: 48 self._closed = False 49 self._process = process 50 self._socket = socket 51 self._reader = reader 52 self._writer = writer 53 self._stderr = stderr 54 self._reader_thread = threading.Thread(target=self._read_loop, name='{}-reader'.format(name)) 55 self._writer_thread = threading.Thread(target=self._write_loop, name='{}-writer'.format(name)) 56 self._stderr_thread = threading.Thread(target=self._stderr_loop, name='{}-stderr'.format(name)) 57 self._callback_object = weakref.ref(callback_object) 58 self._send_queue = Queue(0) # type: Queue[Optional[Dict[str, Any]]] 59 self._reader_thread.start() 60 self._writer_thread.start() 61 self._stderr_thread.start() 62 63 def send(self, payload: Dict[str, Any]) -> None: 64 self._send_queue.put_nowait(payload) 65 66 def close(self) -> None: 67 if not self._closed: 68 self._send_queue.put_nowait(None) 69 if self._socket: 70 self._socket.close() 71 self._closed = True 72 73 def _join_thread(self, t: threading.Thread) -> None: 74 if t.ident == threading.current_thread().ident: 75 return 76 try: 77 t.join(2) 78 except TimeoutError as ex: 79 exception_log("failed to join {} thread".format(t.name), ex) 80 81 def __del__(self) -> None: 82 self.close() 83 self._join_thread(self._writer_thread) 84 self._join_thread(self._reader_thread) 85 self._join_thread(self._stderr_thread) 86 87 def _read_loop(self) -> None: 88 try: 89 while self._reader: 90 line = self._reader.readline() 91 if not line: 92 break 93 try: 94 num_bytes = _content_length(line) 95 except ValueError: 96 continue 97 if num_bytes is None: 98 continue 99 while line and line.strip(): 100 line = self._reader.readline() 101 if not line: 102 continue 103 body = self._reader.read(num_bytes) 104 callback_object = self._callback_object() 105 if callback_object: 106 try: 107 callback_object.on_payload(_decode(body)) 108 except Exception as ex: 109 exception_log("Error handling payload", ex) 110 else: 111 break 112 except (AttributeError, BrokenPipeError): 113 pass 114 except Exception as ex: 115 exception_log("Unexpected exception", ex) 116 self._send_queue.put_nowait(None) 117 118 def _end(self, exception: Optional[Exception]) -> None: 119 exit_code = 0 120 if not exception: 121 try: 122 # Allow the process to stop itself. 123 exit_code = self._process.wait(1) 124 except (AttributeError, ProcessLookupError, subprocess.TimeoutExpired): 125 pass 126 if self._process: 127 try: 128 # The process didn't stop itself. Terminate! 129 self._process.kill() 130 # still wait for the process to die, or zombie processes might be the result 131 # Ignore the exit code in this case, it's going to be something non-zero because we sent SIGKILL. 132 self._process.wait() 133 except (AttributeError, ProcessLookupError): 134 pass 135 except Exception as ex: 136 exception = ex # TODO: Old captured exception is overwritten 137 callback_object = self._callback_object() 138 if callback_object: 139 callback_object.on_transport_close(exit_code, exception) 140 141 def _write_loop(self) -> None: 142 exception = None # type: Optional[Exception] 143 try: 144 while self._writer: 145 d = self._send_queue.get() 146 if d is None: 147 break 148 body = _encode(d) 149 self._writer.writelines(("Content-Length: {}\r\n\r\n".format(len(body)).encode('ascii'), body)) 150 self._writer.flush() 151 except (BrokenPipeError, AttributeError): 152 pass 153 except Exception as ex: 154 exception = ex 155 self._end(exception) 156 157 def _stderr_loop(self) -> None: 158 try: 159 while self._stderr: 160 if self._closed: 161 break 162 message = self._stderr.readline().decode('utf-8', 'replace').rstrip() 163 callback_object = self._callback_object() 164 if callback_object: 165 callback_object.on_stderr_message(message) 166 else: 167 break 168 except (BrokenPipeError, AttributeError): 169 pass 170 except Exception as ex: 171 exception_log('unexpected exception type in stderr loop', ex) 172 self._send_queue.put_nowait(None) 173 174 175 def create_transport(config: ClientConfig, cwd: Optional[str], window: sublime.Window, 176 callback_object: TransportCallbacks, variables: Dict[str, str]) -> JsonRpcTransport: 177 tcp_port = None # type: Optional[int] 178 if config.tcp_port is not None: 179 tcp_port = _find_free_port() if config.tcp_port == 0 else config.tcp_port 180 if tcp_port is not None: 181 variables["port"] = str(tcp_port) 182 args = sublime.expand_variables(config.binary_args, variables) 183 args = [os.path.expanduser(arg) for arg in args] 184 if tcp_port is not None: 185 # DEPRECATED -- replace {port} with $port or ${port} in your client config 186 args = [a.replace('{port}', str(tcp_port)) for a in args] 187 env = os.environ.copy() 188 for var, value in config.env.items(): 189 env[var] = sublime.expand_variables(value, variables) 190 if tcp_port is not None: 191 stdout = subprocess.DEVNULL 192 stdin = subprocess.DEVNULL 193 else: 194 stdout = subprocess.PIPE 195 stdin = subprocess.PIPE 196 if sublime.platform() == "windows": 197 startupinfo = subprocess.STARTUPINFO() # type: ignore 198 startupinfo.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW # type: ignore 199 executable_arg = args[0] 200 fname, ext = os.path.splitext(executable_arg) 201 if len(ext) < 1: 202 path_to_executable = shutil.which(executable_arg) 203 # what extensions should we append so CreateProcess can find it? 204 # node has .cmd 205 # dart has .bat 206 # python has .exe wrappers - not needed 207 for extension in ['.cmd', '.bat']: 208 if path_to_executable and path_to_executable.lower().endswith(extension): 209 args[0] = executable_arg + extension 210 break 211 else: 212 startupinfo = None 213 debug("starting {} in {}".format(args, cwd if cwd else os.getcwd())) 214 process = subprocess.Popen( 215 args=args, 216 stdin=stdin, 217 stdout=stdout, 218 stderr=subprocess.PIPE, 219 startupinfo=startupinfo, 220 env=env, 221 cwd=cwd) 222 _subprocesses.add(process) 223 sock = None # type: Optional[socket.socket] 224 if tcp_port: 225 sock = _connect_tcp(tcp_port) 226 if sock is None: 227 raise RuntimeError("Failed to connect on port {}".format(config.tcp_port)) 228 reader = sock.makefile('rwb') # type: IO[bytes] 229 writer = reader 230 else: 231 reader = process.stdout # type: ignore 232 writer = process.stdin # type: ignore 233 return JsonRpcTransport(config.name, process, sock, reader, writer, process.stderr, callback_object) 234 235 236 _subprocesses = weakref.WeakSet() # type: weakref.WeakSet[subprocess.Popen] 237 238 239 def kill_all_subprocesses() -> None: 240 global _subprocesses 241 subprocesses = list(_subprocesses) 242 for p in subprocesses: 243 try: 244 p.kill() 245 except Exception: 246 pass 247 for p in subprocesses: 248 try: 249 p.wait() 250 except Exception: 251 pass 252 253 254 def _connect_tcp(port: int) -> Optional[socket.socket]: 255 start_time = time.time() 256 while time.time() - start_time < TCP_CONNECT_TIMEOUT: 257 try: 258 return socket.create_connection(('localhost', port)) 259 except ConnectionRefusedError: 260 pass 261 return None 262 263 264 def _find_free_port() -> int: 265 with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: 266 s.bind(('', 0)) 267 s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 268 return s.getsockname()[1] 269 270 271 def _encode(d: Dict[str, Any]) -> bytes: 272 return json.dumps(d, sort_keys=False, check_circular=False, separators=(',', ':')).encode('utf-8') 273 274 275 def _decode(message: bytes) -> Dict[str, Any]: 276 return json.loads(message.decode('utf-8')) 277 278 279 def _content_length(line: bytes) -> Optional[int]: 280 if line.startswith(b'Content-Length: '): 281 _, value = line.split(b'Content-Length: ') 282 value = value.strip() 283 try: 284 return int(value) 285 except ValueError as ex: 286 raise ValueError("Invalid Content-Length header: {}".format(value.decode('ascii'))) from ex 287 return None 288 [end of plugin/core/transports.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugin/core/transports.py b/plugin/core/transports.py --- a/plugin/core/transports.py +++ b/plugin/core/transports.py @@ -137,6 +137,7 @@ callback_object = self._callback_object() if callback_object: callback_object.on_transport_close(exit_code, exception) + self.close() def _write_loop(self) -> None: exception = None # type: Optional[Exception]
{"golden_diff": "diff --git a/plugin/core/transports.py b/plugin/core/transports.py\n--- a/plugin/core/transports.py\n+++ b/plugin/core/transports.py\n@@ -137,6 +137,7 @@\n callback_object = self._callback_object()\n if callback_object:\n callback_object.on_transport_close(exit_code, exception)\n+ self.close()\n \n def _write_loop(self) -> None:\n exception = None # type: Optional[Exception]\n", "issue": "Server is being shutdown on server sending empty stderr line\nWhen server triggers stderr output that is an empty string (or becomes an empty string after `rstrip` then LSP closes the transports and thus the server.\r\n\r\nIssue found when analyzing https://github.com/sublimelsp/LSP-angular/issues/1\n", "before_files": [{"content": "from .logging import exception_log, debug\nfrom .types import ClientConfig\nfrom .typing import Dict, Any, Optional, IO, Protocol\nfrom abc import ABCMeta, abstractmethod\nfrom contextlib import closing\nfrom queue import Queue\nimport json\nimport os\nimport shutil\nimport socket\nimport sublime\nimport subprocess\nimport threading\nimport time\nimport weakref\n\n\nTCP_CONNECT_TIMEOUT = 5\n\n\nclass Transport(metaclass=ABCMeta):\n\n @abstractmethod\n def send(self, payload: Dict[str, Any]) -> None:\n pass\n\n @abstractmethod\n def close(self) -> None:\n pass\n\n\nclass TransportCallbacks(Protocol):\n\n def on_transport_close(self, exit_code: int, exception: Optional[Exception]) -> None:\n ...\n\n def on_payload(self, payload: Dict[str, Any]) -> None:\n ...\n\n def on_stderr_message(self, message: str) -> None:\n ...\n\n\nclass JsonRpcTransport(Transport):\n\n def __init__(self, name: str, process: subprocess.Popen, socket: Optional[socket.socket], reader: IO[bytes],\n writer: IO[bytes], stderr: Optional[IO[bytes]], callback_object: TransportCallbacks) -> None:\n self._closed = False\n self._process = process\n self._socket = socket\n self._reader = reader\n self._writer = writer\n self._stderr = stderr\n self._reader_thread = threading.Thread(target=self._read_loop, name='{}-reader'.format(name))\n self._writer_thread = threading.Thread(target=self._write_loop, name='{}-writer'.format(name))\n self._stderr_thread = threading.Thread(target=self._stderr_loop, name='{}-stderr'.format(name))\n self._callback_object = weakref.ref(callback_object)\n self._send_queue = Queue(0) # type: Queue[Optional[Dict[str, Any]]]\n self._reader_thread.start()\n self._writer_thread.start()\n self._stderr_thread.start()\n\n def send(self, payload: Dict[str, Any]) -> None:\n self._send_queue.put_nowait(payload)\n\n def close(self) -> None:\n if not self._closed:\n self._send_queue.put_nowait(None)\n if self._socket:\n self._socket.close()\n self._closed = True\n\n def _join_thread(self, t: threading.Thread) -> None:\n if t.ident == threading.current_thread().ident:\n return\n try:\n t.join(2)\n except TimeoutError as ex:\n exception_log(\"failed to join {} thread\".format(t.name), ex)\n\n def __del__(self) -> None:\n self.close()\n self._join_thread(self._writer_thread)\n self._join_thread(self._reader_thread)\n self._join_thread(self._stderr_thread)\n\n def _read_loop(self) -> None:\n try:\n while self._reader:\n line = self._reader.readline()\n if not line:\n break\n try:\n num_bytes = _content_length(line)\n except ValueError:\n continue\n if num_bytes is None:\n continue\n while line and line.strip():\n line = self._reader.readline()\n if not line:\n continue\n body = self._reader.read(num_bytes)\n callback_object = self._callback_object()\n if callback_object:\n try:\n callback_object.on_payload(_decode(body))\n except Exception as ex:\n exception_log(\"Error handling payload\", ex)\n else:\n break\n except (AttributeError, BrokenPipeError):\n pass\n except Exception as ex:\n exception_log(\"Unexpected exception\", ex)\n self._send_queue.put_nowait(None)\n\n def _end(self, exception: Optional[Exception]) -> None:\n exit_code = 0\n if not exception:\n try:\n # Allow the process to stop itself.\n exit_code = self._process.wait(1)\n except (AttributeError, ProcessLookupError, subprocess.TimeoutExpired):\n pass\n if self._process:\n try:\n # The process didn't stop itself. Terminate!\n self._process.kill()\n # still wait for the process to die, or zombie processes might be the result\n # Ignore the exit code in this case, it's going to be something non-zero because we sent SIGKILL.\n self._process.wait()\n except (AttributeError, ProcessLookupError):\n pass\n except Exception as ex:\n exception = ex # TODO: Old captured exception is overwritten\n callback_object = self._callback_object()\n if callback_object:\n callback_object.on_transport_close(exit_code, exception)\n\n def _write_loop(self) -> None:\n exception = None # type: Optional[Exception]\n try:\n while self._writer:\n d = self._send_queue.get()\n if d is None:\n break\n body = _encode(d)\n self._writer.writelines((\"Content-Length: {}\\r\\n\\r\\n\".format(len(body)).encode('ascii'), body))\n self._writer.flush()\n except (BrokenPipeError, AttributeError):\n pass\n except Exception as ex:\n exception = ex\n self._end(exception)\n\n def _stderr_loop(self) -> None:\n try:\n while self._stderr:\n if self._closed:\n break\n message = self._stderr.readline().decode('utf-8', 'replace').rstrip()\n callback_object = self._callback_object()\n if callback_object:\n callback_object.on_stderr_message(message)\n else:\n break\n except (BrokenPipeError, AttributeError):\n pass\n except Exception as ex:\n exception_log('unexpected exception type in stderr loop', ex)\n self._send_queue.put_nowait(None)\n\n\ndef create_transport(config: ClientConfig, cwd: Optional[str], window: sublime.Window,\n callback_object: TransportCallbacks, variables: Dict[str, str]) -> JsonRpcTransport:\n tcp_port = None # type: Optional[int]\n if config.tcp_port is not None:\n tcp_port = _find_free_port() if config.tcp_port == 0 else config.tcp_port\n if tcp_port is not None:\n variables[\"port\"] = str(tcp_port)\n args = sublime.expand_variables(config.binary_args, variables)\n args = [os.path.expanduser(arg) for arg in args]\n if tcp_port is not None:\n # DEPRECATED -- replace {port} with $port or ${port} in your client config\n args = [a.replace('{port}', str(tcp_port)) for a in args]\n env = os.environ.copy()\n for var, value in config.env.items():\n env[var] = sublime.expand_variables(value, variables)\n if tcp_port is not None:\n stdout = subprocess.DEVNULL\n stdin = subprocess.DEVNULL\n else:\n stdout = subprocess.PIPE\n stdin = subprocess.PIPE\n if sublime.platform() == \"windows\":\n startupinfo = subprocess.STARTUPINFO() # type: ignore\n startupinfo.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW # type: ignore\n executable_arg = args[0]\n fname, ext = os.path.splitext(executable_arg)\n if len(ext) < 1:\n path_to_executable = shutil.which(executable_arg)\n # what extensions should we append so CreateProcess can find it?\n # node has .cmd\n # dart has .bat\n # python has .exe wrappers - not needed\n for extension in ['.cmd', '.bat']:\n if path_to_executable and path_to_executable.lower().endswith(extension):\n args[0] = executable_arg + extension\n break\n else:\n startupinfo = None\n debug(\"starting {} in {}\".format(args, cwd if cwd else os.getcwd()))\n process = subprocess.Popen(\n args=args,\n stdin=stdin,\n stdout=stdout,\n stderr=subprocess.PIPE,\n startupinfo=startupinfo,\n env=env,\n cwd=cwd)\n _subprocesses.add(process)\n sock = None # type: Optional[socket.socket]\n if tcp_port:\n sock = _connect_tcp(tcp_port)\n if sock is None:\n raise RuntimeError(\"Failed to connect on port {}\".format(config.tcp_port))\n reader = sock.makefile('rwb') # type: IO[bytes]\n writer = reader\n else:\n reader = process.stdout # type: ignore\n writer = process.stdin # type: ignore\n return JsonRpcTransport(config.name, process, sock, reader, writer, process.stderr, callback_object)\n\n\n_subprocesses = weakref.WeakSet() # type: weakref.WeakSet[subprocess.Popen]\n\n\ndef kill_all_subprocesses() -> None:\n global _subprocesses\n subprocesses = list(_subprocesses)\n for p in subprocesses:\n try:\n p.kill()\n except Exception:\n pass\n for p in subprocesses:\n try:\n p.wait()\n except Exception:\n pass\n\n\ndef _connect_tcp(port: int) -> Optional[socket.socket]:\n start_time = time.time()\n while time.time() - start_time < TCP_CONNECT_TIMEOUT:\n try:\n return socket.create_connection(('localhost', port))\n except ConnectionRefusedError:\n pass\n return None\n\n\ndef _find_free_port() -> int:\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind(('', 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]\n\n\ndef _encode(d: Dict[str, Any]) -> bytes:\n return json.dumps(d, sort_keys=False, check_circular=False, separators=(',', ':')).encode('utf-8')\n\n\ndef _decode(message: bytes) -> Dict[str, Any]:\n return json.loads(message.decode('utf-8'))\n\n\ndef _content_length(line: bytes) -> Optional[int]:\n if line.startswith(b'Content-Length: '):\n _, value = line.split(b'Content-Length: ')\n value = value.strip()\n try:\n return int(value)\n except ValueError as ex:\n raise ValueError(\"Invalid Content-Length header: {}\".format(value.decode('ascii'))) from ex\n return None\n", "path": "plugin/core/transports.py"}]}
3,559
99
gh_patches_debug_379
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-3650
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Non-executable files with shebangs in the repository **Describe your environment** (Nothing relevant to describe) **Steps to reproduce** ``` $ rg -l '^#!' | xargs ls -l -rwxr-xr-x. 1 ben ben 1420 Jul 5 2023 docs/examples/django/manage.py -rw-r--r--. 1 ben ben 1300 Jul 5 2023 docs/examples/opencensus-exporter-tracer/collector.py -rwxr-xr-x. 1 ben ben 1485 Jul 5 2023 docs/examples/opentracing/main.py -rwxr-xr-x. 1 ben ben 853 Jul 13 2023 scripts/build.sh -rwxr-xr-x. 1 ben ben 1163 Jan 22 10:06 scripts/coverage.sh -rwxr-xr-x. 1 ben ben 20741 Jul 13 2023 scripts/eachdist.py -rwxr-xr-x. 1 ben ben 215 Jul 5 2023 scripts/generate_website_docs.sh -rwxr-xr-x. 1 ben ben 2377 Jan 22 10:06 scripts/proto_codegen.sh -rwxr-xr-x. 1 ben ben 1928 Jan 22 10:06 scripts/semconv/generate.sh -rwxr-xr-x. 1 ben ben 945 Jul 5 2023 scripts/tracecontext-integration-test.sh -rw-r--r--. 1 ben ben 2519 Jan 22 11:43 tests/w3c_tracecontext_validation_server.py ``` Note that two files have shebang lines (`#!`) but do not have the executable bit set, which makes the shebang lines useless. **What is the expected behavior?** Files should either be non-executable and have no shebang line, or be executable and have a shebang line. **What is the actual behavior?** The following files are not executable and have useless shebang lines: - `docs/examples/opencensus-exporter-tracer/collector.py` - `tests/w3c_tracecontext_validation_server.py` **Additional context** This is a trivial thing, but I would like to fix it in a PR – either by setting the executable bit on these two files, or by removing the useless shebang lines. Both files are “script-like,” i.e. they have `if __name__ == "__main__"` or have useful side effects. Which approach would you prefer? </issue> <code> [start of docs/examples/opencensus-exporter-tracer/collector.py] 1 #!/usr/bin/env python3 2 # 3 # Copyright The OpenTelemetry Authors 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 from opentelemetry import trace 18 from opentelemetry.exporter.opencensus.trace_exporter import ( 19 OpenCensusSpanExporter, 20 ) 21 from opentelemetry.sdk.trace import TracerProvider 22 from opentelemetry.sdk.trace.export import BatchSpanProcessor 23 24 exporter = OpenCensusSpanExporter(endpoint="localhost:55678") 25 26 trace.set_tracer_provider(TracerProvider()) 27 tracer = trace.get_tracer(__name__) 28 span_processor = BatchSpanProcessor(exporter) 29 30 trace.get_tracer_provider().add_span_processor(span_processor) 31 with tracer.start_as_current_span("foo"): 32 with tracer.start_as_current_span("bar"): 33 with tracer.start_as_current_span("baz"): 34 print("Hello world from OpenTelemetry Python!") 35 [end of docs/examples/opencensus-exporter-tracer/collector.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/examples/opencensus-exporter-tracer/collector.py b/docs/examples/opencensus-exporter-tracer/collector.py --- a/docs/examples/opencensus-exporter-tracer/collector.py +++ b/docs/examples/opencensus-exporter-tracer/collector.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License");
{"golden_diff": "diff --git a/docs/examples/opencensus-exporter-tracer/collector.py b/docs/examples/opencensus-exporter-tracer/collector.py\n--- a/docs/examples/opencensus-exporter-tracer/collector.py\n+++ b/docs/examples/opencensus-exporter-tracer/collector.py\n@@ -1,5 +1,3 @@\n-#!/usr/bin/env python3\n-#\n # Copyright The OpenTelemetry Authors\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n", "issue": "Non-executable files with shebangs in the repository\n**Describe your environment**\r\n\r\n(Nothing relevant to describe)\r\n\r\n**Steps to reproduce**\r\n\r\n```\r\n$ rg -l '^#!' | xargs ls -l\r\n-rwxr-xr-x. 1 ben ben 1420 Jul 5 2023 docs/examples/django/manage.py\r\n-rw-r--r--. 1 ben ben 1300 Jul 5 2023 docs/examples/opencensus-exporter-tracer/collector.py\r\n-rwxr-xr-x. 1 ben ben 1485 Jul 5 2023 docs/examples/opentracing/main.py\r\n-rwxr-xr-x. 1 ben ben 853 Jul 13 2023 scripts/build.sh\r\n-rwxr-xr-x. 1 ben ben 1163 Jan 22 10:06 scripts/coverage.sh\r\n-rwxr-xr-x. 1 ben ben 20741 Jul 13 2023 scripts/eachdist.py\r\n-rwxr-xr-x. 1 ben ben 215 Jul 5 2023 scripts/generate_website_docs.sh\r\n-rwxr-xr-x. 1 ben ben 2377 Jan 22 10:06 scripts/proto_codegen.sh\r\n-rwxr-xr-x. 1 ben ben 1928 Jan 22 10:06 scripts/semconv/generate.sh\r\n-rwxr-xr-x. 1 ben ben 945 Jul 5 2023 scripts/tracecontext-integration-test.sh\r\n-rw-r--r--. 1 ben ben 2519 Jan 22 11:43 tests/w3c_tracecontext_validation_server.py\r\n```\r\n\r\nNote that two files have shebang lines (`#!`) but do not have the executable bit set, which makes the shebang lines useless.\r\n\r\n**What is the expected behavior?**\r\n\r\nFiles should either be non-executable and have no shebang line, or be executable and have a shebang line.\r\n\r\n**What is the actual behavior?**\r\n\r\nThe following files are not executable and have useless shebang lines:\r\n\r\n- `docs/examples/opencensus-exporter-tracer/collector.py`\r\n- `tests/w3c_tracecontext_validation_server.py`\r\n\r\n**Additional context**\r\n\r\nThis is a trivial thing, but I would like to fix it in a PR \u2013 either by setting the executable bit on these two files, or by removing the useless shebang lines. Both files are \u201cscript-like,\u201d i.e. they have `if __name__ == \"__main__\"` or have useful side effects. Which approach would you prefer?\n", "before_files": [{"content": "#!/usr/bin/env python3\n#\n# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.opencensus.trace_exporter import (\n OpenCensusSpanExporter,\n)\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nexporter = OpenCensusSpanExporter(endpoint=\"localhost:55678\")\n\ntrace.set_tracer_provider(TracerProvider())\ntracer = trace.get_tracer(__name__)\nspan_processor = BatchSpanProcessor(exporter)\n\ntrace.get_tracer_provider().add_span_processor(span_processor)\nwith tracer.start_as_current_span(\"foo\"):\n with tracer.start_as_current_span(\"bar\"):\n with tracer.start_as_current_span(\"baz\"):\n print(\"Hello world from OpenTelemetry Python!\")\n", "path": "docs/examples/opencensus-exporter-tracer/collector.py"}]}
1,512
106
gh_patches_debug_33894
rasdani/github-patches
git_diff
google__TensorNetwork-377
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Enable contractor contraction on disconnected graphs When we are using tensor network to represent a tensor in the quantum physics, there is no rule that the network must be connected. for example, we can represent an exterior product from two non-connected nodes. </issue> <code> [start of tensornetwork/contractors/opt_einsum_paths/path_contractors.py] 1 # pylint: disable=cyclic-import 2 # Copyright 2019 The TensorNetwork Authors 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 """Contractors based on `opt_einsum`'s path algorithms.""" 16 17 import functools 18 import opt_einsum 19 # pylint: disable=line-too-long 20 from tensornetwork.network_operations import check_connected, get_all_edges, get_subgraph_dangling 21 # pylint: disable=line-too-long 22 from tensornetwork.network_components import get_all_nondangling, contract_parallel 23 from tensornetwork.network_components import Edge, BaseNode 24 from tensornetwork.contractors.opt_einsum_paths import utils 25 from typing import Any, Optional, Sequence, Iterable 26 27 #TODO (martin): add return types of functions back once TensorNetwork is gone 28 # remove _base_network 29 # _base_nodes -> base 30 31 32 def base(nodes: Iterable[BaseNode], 33 algorithm: utils.Algorithm, 34 output_edge_order: Optional[Sequence[Edge]] = None, 35 ignore_edge_order: bool = False) -> BaseNode: 36 """Base method for all `opt_einsum` contractors. 37 38 Args: 39 nodes: A collection of connected nodes. 40 algorithm: `opt_einsum` contraction method to use. 41 output_edge_order: An optional list of edges. Edges of the 42 final node in `nodes_set` 43 are reordered into `output_edge_order`; 44 if final node has more than one edge, 45 `output_edge_order` must be pronvided. 46 ignore_edge_order: An option to ignore the output edge 47 order. 48 49 Returns: 50 Final node after full contraction. 51 """ 52 nodes_set = set(nodes) 53 check_connected(nodes_set) 54 edges = get_all_edges(nodes_set) 55 #output edge order has to be determinded before any contraction 56 #(edges are refreshed after contractions) 57 58 if not ignore_edge_order: 59 if output_edge_order is None: 60 output_edge_order = list(get_subgraph_dangling(nodes)) 61 if len(output_edge_order) > 1: 62 raise ValueError("The final node after contraction has more than " 63 "one remaining edge. In this case `output_edge_order` " 64 "has to be provided.") 65 66 if set(output_edge_order) != get_subgraph_dangling(nodes): 67 raise ValueError( 68 "output edges are not equal to the remaining " 69 "non-contracted edges of the final node." 70 ) 71 72 for edge in edges: 73 if not edge.is_disabled: #if its disabled we already contracted it 74 if edge.is_trace(): 75 nodes_set.remove(edge.node1) 76 nodes_set.add(contract_parallel(edge)) 77 78 if len(nodes_set) == 1: 79 # There's nothing to contract. 80 if ignore_edge_order: 81 return list(nodes_set)[0] 82 return list(nodes_set)[0].reorder_edges(output_edge_order) 83 84 # Then apply `opt_einsum`'s algorithm 85 path, nodes = utils.get_path(nodes_set, algorithm) 86 for a, b in path: 87 new_node = nodes[a] @ nodes[b] 88 nodes.append(new_node) 89 nodes = utils.multi_remove(nodes, [a, b]) 90 91 # if the final node has more than one edge, 92 # output_edge_order has to be specified 93 final_node = nodes[0] # nodes were connected, we checked this 94 if not ignore_edge_order: 95 final_node.reorder_edges(output_edge_order) 96 return final_node 97 98 99 def optimal( 100 nodes: Iterable[BaseNode], 101 output_edge_order: Optional[Sequence[Edge]] = None, 102 memory_limit: Optional[int] = None, 103 ignore_edge_order: bool = False) -> BaseNode: 104 """Optimal contraction order via `opt_einsum`. 105 106 This method will find the truly optimal contraction order via 107 `opt_einsum`'s depth first search algorithm. Since this search is 108 exhaustive, if your network is large (n>10), then the search may 109 take longer than just contracting in a suboptimal way. 110 111 Args: 112 nodes: an iterable of Nodes 113 output_edge_order: An optional list of edges. 114 Edges of the final node in `nodes_set` 115 are reordered into `output_edge_order`; 116 if final node has more than one edge, 117 `output_edge_order` must be provided. 118 memory_limit: Maximum number of elements in an array during contractions. 119 ignore_edge_order: An option to ignore the output edge order. 120 121 Returns: 122 The final node after full contraction. 123 """ 124 alg = functools.partial(opt_einsum.paths.optimal, memory_limit=memory_limit) 125 return base(nodes, alg, output_edge_order, ignore_edge_order) 126 127 128 def branch(nodes: Iterable[BaseNode], 129 output_edge_order: Optional[Sequence[Edge]] = None, 130 memory_limit: Optional[int] = None, 131 nbranch: Optional[int] = None, 132 ignore_edge_order: bool = False) -> BaseNode: 133 """Branch contraction path via `opt_einsum`. 134 135 This method uses the DFS approach of `optimal` while sorting potential 136 contractions based on a heuristic cost, in order to reduce time spent 137 in exploring paths which are unlikely to be optimal. 138 For more details: 139 https://optimized-einsum.readthedocs.io/en/latest/branching_path.html 140 141 Args: 142 nodes: an iterable of Nodes 143 output_edge_order: An optional list of edges. 144 Edges of the final node in `nodes_set` 145 are reordered into `output_edge_order`; 146 if final node has more than one edge, 147 `output_edge_order` must be provided. 148 memory_limit: Maximum number of elements in an array during contractions. 149 nbranch: Number of best contractions to explore. 150 If None it explores all inner products starting with those that 151 have the best cost heuristic. 152 ignore_edge_order: An option to ignore the output edge order. 153 154 Returns: 155 The final node after full contraction. 156 """ 157 alg = functools.partial( 158 opt_einsum.paths.branch, memory_limit=memory_limit, nbranch=nbranch) 159 return base(nodes, alg, output_edge_order, ignore_edge_order) 160 161 162 def greedy( 163 nodes: Iterable[BaseNode], 164 output_edge_order: Optional[Sequence[Edge]] = None, 165 memory_limit: Optional[int] = None, 166 ignore_edge_order: bool = False) -> BaseNode: 167 """Greedy contraction path via `opt_einsum`. 168 169 This provides a more efficient strategy than `optimal` for finding 170 contraction paths in large networks. First contracts pairs of tensors 171 by finding the pair with the lowest cost at each step. Then it performs 172 the outer products. 173 For more details: 174 https://optimized-einsum.readthedocs.io/en/latest/greedy_path.html 175 176 Args: 177 nodes: an iterable of Nodes 178 output_edge_order: An optional list of edges. 179 Edges of the final node in `nodes_set` 180 are reordered into `output_edge_order`; 181 if final node has more than one edge, 182 `output_edge_order` must be provided. 183 memory_limit: Maximum number of elements in an array during contractions. 184 ignore_edge_order: An option to ignore the output edge order. 185 186 Returns: 187 The final node after full contraction. 188 """ 189 alg = functools.partial(opt_einsum.paths.greedy, memory_limit=memory_limit) 190 return base(nodes, alg, output_edge_order, ignore_edge_order) 191 192 193 # pylint: disable=too-many-return-statements 194 def auto( 195 nodes: BaseNode, 196 output_edge_order: Optional[Sequence[Edge]] = None, 197 memory_limit: Optional[int] = None, 198 ignore_edge_order: bool = False) -> BaseNode: 199 """Chooses one of the above algorithms according to network size. 200 201 Default behavior is based on `opt_einsum`'s `auto` contractor. 202 203 Args: 204 nodes: A collection of connected nodes. 205 output_edge_order: An optional list of edges. 206 Edges of the final node in `nodes_set` 207 are reordered into `output_edge_order`; 208 if final node has more than one edge, 209 `output_edge_order` must be provided. 210 memory_limit: Maximum number of elements in an array during contractions. 211 ignore_edge_order: An option to ignore the output edge order. 212 213 Returns: 214 Final node after full contraction. 215 """ 216 217 n = len(list(nodes)) #pytype thing 218 _nodes = nodes 219 if n <= 0: 220 raise ValueError("Cannot contract empty tensor network.") 221 if n == 1: 222 if not ignore_edge_order: 223 if output_edge_order is None: 224 output_edge_order = list( 225 (get_all_edges(_nodes) - get_all_nondangling(_nodes))) 226 if len(output_edge_order) > 1: 227 raise ValueError("The final node after contraction has more than " 228 "one dangling edge. In this case `output_edge_order` " 229 "has to be provided.") 230 231 edges = get_all_nondangling(_nodes) 232 if edges: 233 final_node = contract_parallel(edges.pop()) 234 else: 235 final_node = list(_nodes)[0] 236 final_node.reorder_edges(output_edge_order) 237 if not ignore_edge_order: 238 final_node.reorder_edges(output_edge_order) 239 return final_node 240 241 if n < 5: 242 return optimal(nodes, output_edge_order, memory_limit, ignore_edge_order) 243 if n < 7: 244 return branch(nodes, output_edge_order, memory_limit, ignore_edge_order) 245 if n < 9: 246 return branch(nodes, output_edge_order, memory_limit, nbranch=2, ignore_edge_order=ignore_edge_order) 247 if n < 15: 248 return branch(nodes, output_edge_order, nbranch=1, ignore_edge_order=ignore_edge_order) 249 return greedy(nodes, output_edge_order, memory_limit, ignore_edge_order) 250 251 252 def custom( 253 nodes: Iterable[BaseNode], 254 optimizer: Any, 255 output_edge_order: Sequence[Edge] = None, 256 memory_limit: Optional[int] = None, 257 ignore_edge_order: bool = False) -> BaseNode: 258 """Uses a custom path optimizer created by the user to calculate paths. 259 260 The custom path optimizer should inherit `opt_einsum`'s `PathOptimizer`. 261 For more details: 262 https://optimized-einsum.readthedocs.io/en/latest/custom_paths.html 263 264 Args: 265 nodes: an iterable of Nodes 266 output_edge_order: An optional list of edges. 267 Edges of the final node in `nodes_set` 268 are reordered into `output_edge_order`; 269 if final node has more than one edge, 270 output_edge_order` must be provided. 271 optimizer: A custom `opt_einsum.PathOptimizer` object. 272 memory_limit: Maximum number of elements in an array during contractions. 273 ignore_edge_order: An option to ignore the output edge order. 274 275 Returns: 276 Final node after full contraction. 277 """ 278 alg = functools.partial(optimizer, memory_limit=memory_limit) 279 return base(nodes, alg, output_edge_order, ignore_edge_order) 280 [end of tensornetwork/contractors/opt_einsum_paths/path_contractors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tensornetwork/contractors/opt_einsum_paths/path_contractors.py b/tensornetwork/contractors/opt_einsum_paths/path_contractors.py --- a/tensornetwork/contractors/opt_einsum_paths/path_contractors.py +++ b/tensornetwork/contractors/opt_einsum_paths/path_contractors.py @@ -19,7 +19,7 @@ # pylint: disable=line-too-long from tensornetwork.network_operations import check_connected, get_all_edges, get_subgraph_dangling # pylint: disable=line-too-long -from tensornetwork.network_components import get_all_nondangling, contract_parallel +from tensornetwork.network_components import get_all_nondangling, contract_parallel, contract_between from tensornetwork.network_components import Edge, BaseNode from tensornetwork.contractors.opt_einsum_paths import utils from typing import Any, Optional, Sequence, Iterable @@ -50,7 +50,6 @@ Final node after full contraction. """ nodes_set = set(nodes) - check_connected(nodes_set) edges = get_all_edges(nodes_set) #output edge order has to be determinded before any contraction #(edges are refreshed after contractions) @@ -84,7 +83,7 @@ # Then apply `opt_einsum`'s algorithm path, nodes = utils.get_path(nodes_set, algorithm) for a, b in path: - new_node = nodes[a] @ nodes[b] + new_node = contract_between(nodes[a], nodes[b], allow_outer_product=True) nodes.append(new_node) nodes = utils.multi_remove(nodes, [a, b]) @@ -192,7 +191,7 @@ # pylint: disable=too-many-return-statements def auto( - nodes: BaseNode, + nodes: Iterable[BaseNode], output_edge_order: Optional[Sequence[Edge]] = None, memory_limit: Optional[int] = None, ignore_edge_order: bool = False) -> BaseNode:
{"golden_diff": "diff --git a/tensornetwork/contractors/opt_einsum_paths/path_contractors.py b/tensornetwork/contractors/opt_einsum_paths/path_contractors.py\n--- a/tensornetwork/contractors/opt_einsum_paths/path_contractors.py\n+++ b/tensornetwork/contractors/opt_einsum_paths/path_contractors.py\n@@ -19,7 +19,7 @@\n # pylint: disable=line-too-long\n from tensornetwork.network_operations import check_connected, get_all_edges, get_subgraph_dangling\n # pylint: disable=line-too-long\n-from tensornetwork.network_components import get_all_nondangling, contract_parallel\n+from tensornetwork.network_components import get_all_nondangling, contract_parallel, contract_between\n from tensornetwork.network_components import Edge, BaseNode\n from tensornetwork.contractors.opt_einsum_paths import utils\n from typing import Any, Optional, Sequence, Iterable\n@@ -50,7 +50,6 @@\n Final node after full contraction.\n \"\"\"\n nodes_set = set(nodes)\n- check_connected(nodes_set)\n edges = get_all_edges(nodes_set)\n #output edge order has to be determinded before any contraction\n #(edges are refreshed after contractions)\n@@ -84,7 +83,7 @@\n # Then apply `opt_einsum`'s algorithm\n path, nodes = utils.get_path(nodes_set, algorithm)\n for a, b in path:\n- new_node = nodes[a] @ nodes[b]\n+ new_node = contract_between(nodes[a], nodes[b], allow_outer_product=True)\n nodes.append(new_node)\n nodes = utils.multi_remove(nodes, [a, b])\n \n@@ -192,7 +191,7 @@\n \n # pylint: disable=too-many-return-statements\n def auto(\n- nodes: BaseNode,\n+ nodes: Iterable[BaseNode],\n output_edge_order: Optional[Sequence[Edge]] = None,\n memory_limit: Optional[int] = None,\n ignore_edge_order: bool = False) -> BaseNode:\n", "issue": "Enable contractor contraction on disconnected graphs\nWhen we are using tensor network to represent a tensor in the quantum physics, there is no rule that the network must be connected. for example, we can represent an exterior product from two non-connected nodes.\n", "before_files": [{"content": "# pylint: disable=cyclic-import\n# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Contractors based on `opt_einsum`'s path algorithms.\"\"\"\n\nimport functools\nimport opt_einsum\n# pylint: disable=line-too-long\nfrom tensornetwork.network_operations import check_connected, get_all_edges, get_subgraph_dangling\n# pylint: disable=line-too-long\nfrom tensornetwork.network_components import get_all_nondangling, contract_parallel\nfrom tensornetwork.network_components import Edge, BaseNode\nfrom tensornetwork.contractors.opt_einsum_paths import utils\nfrom typing import Any, Optional, Sequence, Iterable\n\n#TODO (martin): add return types of functions back once TensorNetwork is gone\n# remove _base_network\n# _base_nodes -> base\n\n\ndef base(nodes: Iterable[BaseNode],\n algorithm: utils.Algorithm,\n output_edge_order: Optional[Sequence[Edge]] = None,\n ignore_edge_order: bool = False) -> BaseNode:\n \"\"\"Base method for all `opt_einsum` contractors.\n\n Args:\n nodes: A collection of connected nodes.\n algorithm: `opt_einsum` contraction method to use.\n output_edge_order: An optional list of edges. Edges of the\n final node in `nodes_set`\n are reordered into `output_edge_order`;\n if final node has more than one edge,\n `output_edge_order` must be pronvided.\n ignore_edge_order: An option to ignore the output edge\n order.\n\n Returns:\n Final node after full contraction.\n \"\"\"\n nodes_set = set(nodes)\n check_connected(nodes_set)\n edges = get_all_edges(nodes_set)\n #output edge order has to be determinded before any contraction\n #(edges are refreshed after contractions)\n\n if not ignore_edge_order:\n if output_edge_order is None:\n output_edge_order = list(get_subgraph_dangling(nodes))\n if len(output_edge_order) > 1:\n raise ValueError(\"The final node after contraction has more than \"\n \"one remaining edge. In this case `output_edge_order` \"\n \"has to be provided.\")\n\n if set(output_edge_order) != get_subgraph_dangling(nodes):\n raise ValueError(\n \"output edges are not equal to the remaining \"\n \"non-contracted edges of the final node.\"\n )\n\n for edge in edges:\n if not edge.is_disabled: #if its disabled we already contracted it\n if edge.is_trace():\n nodes_set.remove(edge.node1)\n nodes_set.add(contract_parallel(edge))\n\n if len(nodes_set) == 1:\n # There's nothing to contract.\n if ignore_edge_order:\n return list(nodes_set)[0]\n return list(nodes_set)[0].reorder_edges(output_edge_order)\n\n # Then apply `opt_einsum`'s algorithm\n path, nodes = utils.get_path(nodes_set, algorithm)\n for a, b in path:\n new_node = nodes[a] @ nodes[b]\n nodes.append(new_node)\n nodes = utils.multi_remove(nodes, [a, b])\n\n # if the final node has more than one edge,\n # output_edge_order has to be specified\n final_node = nodes[0] # nodes were connected, we checked this\n if not ignore_edge_order:\n final_node.reorder_edges(output_edge_order)\n return final_node\n\n\ndef optimal(\n nodes: Iterable[BaseNode],\n output_edge_order: Optional[Sequence[Edge]] = None,\n memory_limit: Optional[int] = None,\n ignore_edge_order: bool = False) -> BaseNode:\n \"\"\"Optimal contraction order via `opt_einsum`.\n\n This method will find the truly optimal contraction order via\n `opt_einsum`'s depth first search algorithm. Since this search is\n exhaustive, if your network is large (n>10), then the search may\n take longer than just contracting in a suboptimal way.\n\n Args:\n nodes: an iterable of Nodes\n output_edge_order: An optional list of edges.\n Edges of the final node in `nodes_set`\n are reordered into `output_edge_order`;\n if final node has more than one edge,\n `output_edge_order` must be provided.\n memory_limit: Maximum number of elements in an array during contractions.\n ignore_edge_order: An option to ignore the output edge order.\n\n Returns:\n The final node after full contraction.\n \"\"\"\n alg = functools.partial(opt_einsum.paths.optimal, memory_limit=memory_limit)\n return base(nodes, alg, output_edge_order, ignore_edge_order)\n\n\ndef branch(nodes: Iterable[BaseNode],\n output_edge_order: Optional[Sequence[Edge]] = None,\n memory_limit: Optional[int] = None,\n nbranch: Optional[int] = None,\n ignore_edge_order: bool = False) -> BaseNode:\n \"\"\"Branch contraction path via `opt_einsum`.\n\n This method uses the DFS approach of `optimal` while sorting potential\n contractions based on a heuristic cost, in order to reduce time spent\n in exploring paths which are unlikely to be optimal.\n For more details:\n https://optimized-einsum.readthedocs.io/en/latest/branching_path.html\n\n Args:\n nodes: an iterable of Nodes\n output_edge_order: An optional list of edges.\n Edges of the final node in `nodes_set`\n are reordered into `output_edge_order`;\n if final node has more than one edge,\n `output_edge_order` must be provided.\n memory_limit: Maximum number of elements in an array during contractions.\n nbranch: Number of best contractions to explore.\n If None it explores all inner products starting with those that\n have the best cost heuristic.\n ignore_edge_order: An option to ignore the output edge order.\n\n Returns:\n The final node after full contraction.\n \"\"\"\n alg = functools.partial(\n opt_einsum.paths.branch, memory_limit=memory_limit, nbranch=nbranch)\n return base(nodes, alg, output_edge_order, ignore_edge_order)\n\n\ndef greedy(\n nodes: Iterable[BaseNode],\n output_edge_order: Optional[Sequence[Edge]] = None,\n memory_limit: Optional[int] = None,\n ignore_edge_order: bool = False) -> BaseNode:\n \"\"\"Greedy contraction path via `opt_einsum`.\n\n This provides a more efficient strategy than `optimal` for finding\n contraction paths in large networks. First contracts pairs of tensors\n by finding the pair with the lowest cost at each step. Then it performs\n the outer products.\n For more details:\n https://optimized-einsum.readthedocs.io/en/latest/greedy_path.html\n\n Args:\n nodes: an iterable of Nodes\n output_edge_order: An optional list of edges.\n Edges of the final node in `nodes_set`\n are reordered into `output_edge_order`;\n if final node has more than one edge,\n `output_edge_order` must be provided.\n memory_limit: Maximum number of elements in an array during contractions.\n ignore_edge_order: An option to ignore the output edge order.\n\n Returns:\n The final node after full contraction.\n \"\"\"\n alg = functools.partial(opt_einsum.paths.greedy, memory_limit=memory_limit)\n return base(nodes, alg, output_edge_order, ignore_edge_order)\n\n\n# pylint: disable=too-many-return-statements\ndef auto(\n nodes: BaseNode,\n output_edge_order: Optional[Sequence[Edge]] = None,\n memory_limit: Optional[int] = None,\n ignore_edge_order: bool = False) -> BaseNode:\n \"\"\"Chooses one of the above algorithms according to network size.\n\n Default behavior is based on `opt_einsum`'s `auto` contractor.\n\n Args:\n nodes: A collection of connected nodes.\n output_edge_order: An optional list of edges.\n Edges of the final node in `nodes_set`\n are reordered into `output_edge_order`;\n if final node has more than one edge,\n `output_edge_order` must be provided.\n memory_limit: Maximum number of elements in an array during contractions.\n ignore_edge_order: An option to ignore the output edge order.\n\n Returns:\n Final node after full contraction.\n \"\"\"\n\n n = len(list(nodes)) #pytype thing\n _nodes = nodes\n if n <= 0:\n raise ValueError(\"Cannot contract empty tensor network.\")\n if n == 1:\n if not ignore_edge_order:\n if output_edge_order is None:\n output_edge_order = list(\n (get_all_edges(_nodes) - get_all_nondangling(_nodes)))\n if len(output_edge_order) > 1:\n raise ValueError(\"The final node after contraction has more than \"\n \"one dangling edge. In this case `output_edge_order` \"\n \"has to be provided.\")\n\n edges = get_all_nondangling(_nodes)\n if edges:\n final_node = contract_parallel(edges.pop())\n else:\n final_node = list(_nodes)[0]\n final_node.reorder_edges(output_edge_order)\n if not ignore_edge_order:\n final_node.reorder_edges(output_edge_order)\n return final_node\n\n if n < 5:\n return optimal(nodes, output_edge_order, memory_limit, ignore_edge_order)\n if n < 7:\n return branch(nodes, output_edge_order, memory_limit, ignore_edge_order)\n if n < 9:\n return branch(nodes, output_edge_order, memory_limit, nbranch=2, ignore_edge_order=ignore_edge_order)\n if n < 15:\n return branch(nodes, output_edge_order, nbranch=1, ignore_edge_order=ignore_edge_order)\n return greedy(nodes, output_edge_order, memory_limit, ignore_edge_order)\n\n\ndef custom(\n nodes: Iterable[BaseNode],\n optimizer: Any,\n output_edge_order: Sequence[Edge] = None,\n memory_limit: Optional[int] = None,\n ignore_edge_order: bool = False) -> BaseNode:\n \"\"\"Uses a custom path optimizer created by the user to calculate paths.\n\n The custom path optimizer should inherit `opt_einsum`'s `PathOptimizer`.\n For more details:\n https://optimized-einsum.readthedocs.io/en/latest/custom_paths.html\n\n Args:\n nodes: an iterable of Nodes\n output_edge_order: An optional list of edges.\n Edges of the final node in `nodes_set`\n are reordered into `output_edge_order`;\n if final node has more than one edge,\n output_edge_order` must be provided.\n optimizer: A custom `opt_einsum.PathOptimizer` object.\n memory_limit: Maximum number of elements in an array during contractions.\n ignore_edge_order: An option to ignore the output edge order.\n\n Returns:\n Final node after full contraction.\n \"\"\"\n alg = functools.partial(optimizer, memory_limit=memory_limit)\n return base(nodes, alg, output_edge_order, ignore_edge_order)\n", "path": "tensornetwork/contractors/opt_einsum_paths/path_contractors.py"}]}
3,828
444
gh_patches_debug_12207
rasdani/github-patches
git_diff
ESMCI__cime-249
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Misleading error message in case_setup.py I suggest changing "%s NINST value %d greater than %s NTASKS %d" (e.g ERROR: ATM NINST value 1 greater than ATM NTASKS 0) to "NINST_%s value %d greater than NTASKS_%s %d (e.g. ERROR: NINST_ATM value 1 greater than NTASKS_ATM 0) to reflect the real variable name which can be queried or changed with xmlquery/xmlchange </issue> <code> [start of utils/python/CIME/case_setup.py] 1 """ 2 Library for case.setup. 3 """ 4 5 from CIME.XML.standard_module_setup import * 6 7 from CIME.check_lockedfiles import check_lockedfiles 8 from CIME.preview_namelists import preview_namelists 9 from CIME.XML.env_mach_pes import EnvMachPes 10 from CIME.XML.component import Component 11 from CIME.XML.compilers import Compilers 12 from CIME.utils import expect, run_cmd, append_status 13 14 import shutil, time, glob 15 16 logger = logging.getLogger(__name__) 17 18 ############################################################################### 19 def _check_pelayouts_require_rebuild(case, models): 20 ############################################################################### 21 """ 22 Create if we require a rebuild, expects cwd is caseroot 23 """ 24 locked_pes = "LockedFiles/env_mach_pes.xml" 25 if os.path.exists(locked_pes): 26 # Look to see if $comp_PE_CHANGE_REQUIRES_REBUILD is defined 27 # for any component 28 env_mach_pes_locked = EnvMachPes(infile=locked_pes) 29 for comp in models: 30 if case.get_value("%s_PE_CHANGE_REQUIRES_REBUILD" % comp): 31 # Changing these values in env_mach_pes.xml will force 32 # you to clean the corresponding component 33 old_tasks = env_mach_pes_locked.get_value("NTASKS_%s" % comp) 34 old_threads = env_mach_pes_locked.get_value("NTHRDS_%s" % comp) 35 old_inst = env_mach_pes_locked.get_value("NINST_%s" % comp) 36 37 new_tasks = case.get_value("NTASKS_%s" % comp) 38 new_threads = case.get_value("NTHRDS_%s" % comp) 39 new_inst = case.get_value("NINST_%s" % comp) 40 41 if old_tasks != new_tasks or old_threads != new_threads or old_inst != new_inst: 42 logger.warn("%s pe change requires clean build" % comp) 43 cleanflag = comp.lower() 44 run_cmd("./case.build --clean %s" % cleanflag) 45 46 os.remove(locked_pes) 47 48 ############################################################################### 49 def _build_usernl_files(case, model, comp): 50 ############################################################################### 51 """ 52 Create user_nl_xxx files, expects cwd is caseroot 53 """ 54 model = model.upper() 55 model_file = case.get_value("CONFIG_%s_FILE" % model) 56 model_dir = os.path.dirname(model_file) 57 58 expect(os.path.isdir(model_dir), 59 "cannot find cime_config directory %s for component %s" % (model_dir, comp)) 60 61 if comp == "cpl": 62 if not os.path.exists("user_nl_cpl"): 63 shutil.copy(os.path.join(model_dir, "user_nl_cpl"), ".") 64 else: 65 ninst = case.get_value("NINST_%s" % model) 66 nlfile = "user_nl_%s" % comp 67 model_nl = os.path.join(model_dir, nlfile) 68 if os.path.exists(model_nl): 69 if ninst > 1: 70 for inst_counter in xrange(1, ninst+1): 71 case_nlfile = "%s_%04d" % (nlfile, inst_counter) 72 if not os.path.exists(case_nlfile): 73 shutil.copy(model_nl, case_nlfile) 74 else: 75 if not os.path.exists(nlfile): 76 shutil.copy(model_nl, nlfile) 77 78 ############################################################################### 79 def case_setup(case, clean=False, test_mode=False, reset=False): 80 ############################################################################### 81 caseroot = case.get_value("CASEROOT") 82 os.chdir(caseroot) 83 msg = "case.setup starting" 84 append_status(msg, caseroot=caseroot, sfile="CaseStatus") 85 86 cimeroot = os.environ["CIMEROOT"] 87 88 # Check that $DIN_LOC_ROOT exists - and abort if not a namelist compare tests 89 din_loc_root = case.get_value("DIN_LOC_ROOT") 90 testcase = case.get_value("TESTCASE") 91 expect(not (not os.path.isdir(din_loc_root) and testcase != "SBN"), 92 "inputdata root is not a directory: \"$din_loc_root\" ") 93 94 # Check that userdefine settings are specified before expanding variable 95 for vid, value in case: 96 expect(not (type(value) is str and "USERDEFINED_required_build" in value), 97 "Parameter '%s' must be defined" % vid) 98 99 # Create batch script 100 if reset or clean: 101 # Clean batch script 102 103 backup_dir = "PESetupHist/b.%s" % time.strftime("%y%m%d-%H%M%S") 104 if not os.path.isdir(backup_dir): 105 os.makedirs(backup_dir) 106 107 # back up relevant files 108 for fileglob in ["case.run", "env_build.xml", "env_mach_pes.xml", "Macros*"]: 109 for filename in glob.glob(fileglob): 110 shutil.copy(filename, backup_dir) 111 if os.path.exists("case.run"): 112 os.remove("case.run") 113 114 # only do the following if are NOT in testmode 115 if not test_mode: 116 # rebuild the models (even on restart) 117 case.set_value("BUILD_COMPLETE", False) 118 119 # backup and then clean test script 120 if os.path.exists("case.test"): 121 shutil.copy("case.test", backup_dir) 122 os.remove("case.test") 123 logger.info("Successfully cleaned test script case.test") 124 125 if os.path.exists("case.testdriver"): 126 shutil.copy("case.testdriver", backup_dir) 127 os.remove("case.testdriver") 128 logger.info("Successfully cleaned test script case.testdriver") 129 130 logger.info("Successfully cleaned batch script case.run") 131 132 logger.info("Successfully cleaned batch script case.run") 133 logger.info("Some files have been saved to %s" % backup_dir) 134 135 msg = "case.setup clean complete" 136 append_status(msg, caseroot=caseroot, sfile="CaseStatus") 137 138 if not clean: 139 drv_comp = Component() 140 models = drv_comp.get_valid_model_components() 141 models.remove("DRV") 142 143 mach = case.get_value("MACH") 144 expect(mach is not None, "xml variable MACH is not set") 145 146 # Create Macros file only if it does not exist 147 if not os.path.exists("Macros"): 148 logger.debug("Creating Macros file for %s" % mach) 149 compilers = Compilers(compiler=case.get_value("COMPILER"), machine=mach, os_=case.get_value("OS"), mpilib=case.get_value("MPILIB")) 150 compilers.write_macros_file() 151 else: 152 logger.debug("Macros script already created ...skipping") 153 154 # Set tasks to 1 if mpi-serial library 155 if case.get_value("MPILIB") == "mpi-serial": 156 for vid, value in case: 157 if vid.startswith("NTASKS_") and value != 1: 158 case.set_value(vid, 1) 159 160 # Check ninst. 161 # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component. 162 for comp in models: 163 ninst = case.get_value("NINST_%s" % comp) 164 ntasks = case.get_value("NTASKS_%s" % comp) 165 if ninst > ntasks: 166 if ntasks == 1: 167 case.set_value("NTASKS_%s" % comp, ninst) 168 else: 169 expect(False, "%s NINST value %d greater than %s NTASKS %d" % (comp, ninst, comp, ntasks)) 170 171 expect(not (case.get_value("BUILD_THREADED") and case.get_value("COMPILER") == "nag"), 172 "it is not possible to run with OpenMP if using the NAG Fortran compiler") 173 174 if os.path.exists("case.run"): 175 logger.info("Machine/Decomp/Pes configuration has already been done ...skipping") 176 else: 177 _check_pelayouts_require_rebuild(case, models) 178 179 if os.path.exists("LockedFiles/env_build.xml"): 180 os.remove("LockedFiles/env_build.xml") 181 182 case.flush() 183 check_lockedfiles() 184 185 pestot = int(run_cmd("Tools/taskmaker.pl -sumonly")) 186 case.set_value("TOTALPES", pestot) 187 188 # Compute cost based on PE count 189 pval = 1 190 pcnt = 0 191 while pval < pestot: 192 pval *= 2 193 pcnt += 6 # (scaling like sqrt(6/10)) 194 pcost = 3 - pcnt / 10 # (3 is 64 with 6) 195 196 # Compute cost based on DEBUG 197 dcost = 3 if case.get_value("DEBUG") else 0 198 199 # Compute cost based on run length 200 # For simplicity, we use a heuristic just based on STOP_OPTION (not considering 201 # STOP_N), and only deal with options longer than ndays 202 lcost = 0 203 if "nmonth" in case.get_value("STOP_OPTION"): 204 # N months costs 30x as much as N days; since cost is based on log-base-2, add 5 205 lcost = 5 206 elif "nyear" in case.get_value("STOP_OPTION"): 207 # N years costs 365x as much as N days; since cost is based on log-base-2, add 9 208 lcost = 9 209 210 estcost = pcost + dcost + lcost 211 for cost in ["CCSM_CCOST", "CCSM_GCOST", "CCSM_TCOST", "CCSM_CCOST"]: 212 estcost += case.get_value(cost) 213 214 case.set_value("CCSM_PCOST", pcost) 215 case.set_value("CCSM_ESTCOST", estcost) 216 217 # create batch file 218 logger.info("Creating batch script case.run") 219 220 # Use BatchFactory to get the appropriate instance of a BatchMaker, 221 # use it to create our batch scripts 222 env_batch = case._get_env("batch") 223 for job in env_batch.get_jobs(): 224 input_batch_script = os.path.join(case.get_value("MACHDIR"), env_batch.get_value('template', subgroup=job)) 225 if job == "case.test" and testcase is not None and not test_mode: 226 logger.info("Writing %s script" % job) 227 testscript = os.path.join(cimeroot, "scripts", "Testing", "Testcases", "%s_script" % testcase) 228 # Short term fix to be removed when csh tests are removed 229 if not os.path.exists(testscript): 230 env_batch.make_batch_script(input_batch_script, job, case) 231 elif job != "case.test": 232 logger.info("Writing %s script" % job) 233 env_batch.make_batch_script(input_batch_script, job, case) 234 235 # Make a copy of env_mach_pes.xml in order to be able 236 # to check that it does not change once case.setup is invoked 237 logger.info("Locking file env_mach_pes.xml") 238 case.flush() 239 shutil.copy("env_mach_pes.xml", "LockedFiles") 240 241 # Create user_nl files for the required number of instances 242 if not os.path.exists("user_nl_cpl"): 243 logger.info("Creating user_nl_xxx files for components and cpl") 244 # loop over models 245 for model in models: 246 comp = case.get_value("COMP_%s" % model) 247 logger.info("Building %s usernl files"%model) 248 _build_usernl_files(case, model, comp) 249 if comp == "cism": 250 run_cmd("%s/../components/cism/cime_config/cism.template %s" % (cimeroot, caseroot)) 251 252 _build_usernl_files(case, "drv", "cpl") 253 254 # Run preview namelists for scripts 255 logger.info("preview_namelists") 256 preview_namelists(case) 257 258 logger.info("See ./CaseDoc for component namelists") 259 logger.info("If an old case build already exists, might want to run \'case.build --clean-all\' before building") 260 261 # Create test script if appropriate 262 # Short term fix to be removed when csh tests are removed 263 if os.path.exists("env_test.xml"): 264 if not os.path.exists("case.test"): 265 logger.info("Starting testcase.setup") 266 run_cmd("./testcase.setup -caseroot %s" % caseroot) 267 logger.info("Finished testcase.setup") 268 269 msg = "case.setup complete" 270 append_status(msg, caseroot=caseroot, sfile="CaseStatus") 271 272 [end of utils/python/CIME/case_setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/utils/python/CIME/case_setup.py b/utils/python/CIME/case_setup.py --- a/utils/python/CIME/case_setup.py +++ b/utils/python/CIME/case_setup.py @@ -166,7 +166,7 @@ if ntasks == 1: case.set_value("NTASKS_%s" % comp, ninst) else: - expect(False, "%s NINST value %d greater than %s NTASKS %d" % (comp, ninst, comp, ntasks)) + expect(False, "NINST_%s value %d greater than NTASKS_%s %d" % (comp, ninst, comp, ntasks)) expect(not (case.get_value("BUILD_THREADED") and case.get_value("COMPILER") == "nag"), "it is not possible to run with OpenMP if using the NAG Fortran compiler")
{"golden_diff": "diff --git a/utils/python/CIME/case_setup.py b/utils/python/CIME/case_setup.py\n--- a/utils/python/CIME/case_setup.py\n+++ b/utils/python/CIME/case_setup.py\n@@ -166,7 +166,7 @@\n if ntasks == 1:\n case.set_value(\"NTASKS_%s\" % comp, ninst)\n else:\n- expect(False, \"%s NINST value %d greater than %s NTASKS %d\" % (comp, ninst, comp, ntasks))\n+ expect(False, \"NINST_%s value %d greater than NTASKS_%s %d\" % (comp, ninst, comp, ntasks))\n \n expect(not (case.get_value(\"BUILD_THREADED\") and case.get_value(\"COMPILER\") == \"nag\"),\n \"it is not possible to run with OpenMP if using the NAG Fortran compiler\")\n", "issue": "Misleading error message in case_setup.py \nI suggest changing \"%s NINST value %d greater than %s NTASKS %d\" \n(e.g ERROR: ATM NINST value 1 greater than ATM NTASKS 0)\n\nto \n\n\"NINST_%s value %d greater than NTASKS_%s %d\n(e.g. ERROR: NINST_ATM value 1 greater than NTASKS_ATM 0)\n\nto reflect the real variable name which can be queried or changed with xmlquery/xmlchange\n\n", "before_files": [{"content": "\"\"\"\nLibrary for case.setup.\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.check_lockedfiles import check_lockedfiles\nfrom CIME.preview_namelists import preview_namelists\nfrom CIME.XML.env_mach_pes import EnvMachPes\nfrom CIME.XML.component import Component\nfrom CIME.XML.compilers import Compilers\nfrom CIME.utils import expect, run_cmd, append_status\n\nimport shutil, time, glob\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef _check_pelayouts_require_rebuild(case, models):\n###############################################################################\n \"\"\"\n Create if we require a rebuild, expects cwd is caseroot\n \"\"\"\n locked_pes = \"LockedFiles/env_mach_pes.xml\"\n if os.path.exists(locked_pes):\n # Look to see if $comp_PE_CHANGE_REQUIRES_REBUILD is defined\n # for any component\n env_mach_pes_locked = EnvMachPes(infile=locked_pes)\n for comp in models:\n if case.get_value(\"%s_PE_CHANGE_REQUIRES_REBUILD\" % comp):\n # Changing these values in env_mach_pes.xml will force\n # you to clean the corresponding component\n old_tasks = env_mach_pes_locked.get_value(\"NTASKS_%s\" % comp)\n old_threads = env_mach_pes_locked.get_value(\"NTHRDS_%s\" % comp)\n old_inst = env_mach_pes_locked.get_value(\"NINST_%s\" % comp)\n\n new_tasks = case.get_value(\"NTASKS_%s\" % comp)\n new_threads = case.get_value(\"NTHRDS_%s\" % comp)\n new_inst = case.get_value(\"NINST_%s\" % comp)\n\n if old_tasks != new_tasks or old_threads != new_threads or old_inst != new_inst:\n logger.warn(\"%s pe change requires clean build\" % comp)\n cleanflag = comp.lower()\n run_cmd(\"./case.build --clean %s\" % cleanflag)\n\n os.remove(locked_pes)\n\n###############################################################################\ndef _build_usernl_files(case, model, comp):\n###############################################################################\n \"\"\"\n Create user_nl_xxx files, expects cwd is caseroot\n \"\"\"\n model = model.upper()\n model_file = case.get_value(\"CONFIG_%s_FILE\" % model)\n model_dir = os.path.dirname(model_file)\n\n expect(os.path.isdir(model_dir),\n \"cannot find cime_config directory %s for component %s\" % (model_dir, comp))\n\n if comp == \"cpl\":\n if not os.path.exists(\"user_nl_cpl\"):\n shutil.copy(os.path.join(model_dir, \"user_nl_cpl\"), \".\")\n else:\n ninst = case.get_value(\"NINST_%s\" % model)\n nlfile = \"user_nl_%s\" % comp\n model_nl = os.path.join(model_dir, nlfile)\n if os.path.exists(model_nl):\n if ninst > 1:\n for inst_counter in xrange(1, ninst+1):\n case_nlfile = \"%s_%04d\" % (nlfile, inst_counter)\n if not os.path.exists(case_nlfile):\n shutil.copy(model_nl, case_nlfile)\n else:\n if not os.path.exists(nlfile):\n shutil.copy(model_nl, nlfile)\n\n###############################################################################\ndef case_setup(case, clean=False, test_mode=False, reset=False):\n###############################################################################\n caseroot = case.get_value(\"CASEROOT\")\n os.chdir(caseroot)\n msg = \"case.setup starting\"\n append_status(msg, caseroot=caseroot, sfile=\"CaseStatus\")\n\n cimeroot = os.environ[\"CIMEROOT\"]\n\n # Check that $DIN_LOC_ROOT exists - and abort if not a namelist compare tests\n din_loc_root = case.get_value(\"DIN_LOC_ROOT\")\n testcase = case.get_value(\"TESTCASE\")\n expect(not (not os.path.isdir(din_loc_root) and testcase != \"SBN\"),\n \"inputdata root is not a directory: \\\"$din_loc_root\\\" \")\n\n # Check that userdefine settings are specified before expanding variable\n for vid, value in case:\n expect(not (type(value) is str and \"USERDEFINED_required_build\" in value),\n \"Parameter '%s' must be defined\" % vid)\n\n # Create batch script\n if reset or clean:\n # Clean batch script\n\n backup_dir = \"PESetupHist/b.%s\" % time.strftime(\"%y%m%d-%H%M%S\")\n if not os.path.isdir(backup_dir):\n os.makedirs(backup_dir)\n\n # back up relevant files\n for fileglob in [\"case.run\", \"env_build.xml\", \"env_mach_pes.xml\", \"Macros*\"]:\n for filename in glob.glob(fileglob):\n shutil.copy(filename, backup_dir)\n if os.path.exists(\"case.run\"):\n os.remove(\"case.run\")\n\n # only do the following if are NOT in testmode\n if not test_mode:\n # rebuild the models (even on restart)\n case.set_value(\"BUILD_COMPLETE\", False)\n\n # backup and then clean test script\n if os.path.exists(\"case.test\"):\n shutil.copy(\"case.test\", backup_dir)\n os.remove(\"case.test\")\n logger.info(\"Successfully cleaned test script case.test\")\n\n if os.path.exists(\"case.testdriver\"):\n shutil.copy(\"case.testdriver\", backup_dir)\n os.remove(\"case.testdriver\")\n logger.info(\"Successfully cleaned test script case.testdriver\")\n\n logger.info(\"Successfully cleaned batch script case.run\")\n\n logger.info(\"Successfully cleaned batch script case.run\")\n logger.info(\"Some files have been saved to %s\" % backup_dir)\n\n msg = \"case.setup clean complete\"\n append_status(msg, caseroot=caseroot, sfile=\"CaseStatus\")\n\n if not clean:\n drv_comp = Component()\n models = drv_comp.get_valid_model_components()\n models.remove(\"DRV\")\n\n mach = case.get_value(\"MACH\")\n expect(mach is not None, \"xml variable MACH is not set\")\n\n # Create Macros file only if it does not exist\n if not os.path.exists(\"Macros\"):\n logger.debug(\"Creating Macros file for %s\" % mach)\n compilers = Compilers(compiler=case.get_value(\"COMPILER\"), machine=mach, os_=case.get_value(\"OS\"), mpilib=case.get_value(\"MPILIB\"))\n compilers.write_macros_file()\n else:\n logger.debug(\"Macros script already created ...skipping\")\n\n # Set tasks to 1 if mpi-serial library\n if case.get_value(\"MPILIB\") == \"mpi-serial\":\n for vid, value in case:\n if vid.startswith(\"NTASKS_\") and value != 1:\n case.set_value(vid, 1)\n\n # Check ninst.\n # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component.\n for comp in models:\n ninst = case.get_value(\"NINST_%s\" % comp)\n ntasks = case.get_value(\"NTASKS_%s\" % comp)\n if ninst > ntasks:\n if ntasks == 1:\n case.set_value(\"NTASKS_%s\" % comp, ninst)\n else:\n expect(False, \"%s NINST value %d greater than %s NTASKS %d\" % (comp, ninst, comp, ntasks))\n\n expect(not (case.get_value(\"BUILD_THREADED\") and case.get_value(\"COMPILER\") == \"nag\"),\n \"it is not possible to run with OpenMP if using the NAG Fortran compiler\")\n\n if os.path.exists(\"case.run\"):\n logger.info(\"Machine/Decomp/Pes configuration has already been done ...skipping\")\n else:\n _check_pelayouts_require_rebuild(case, models)\n\n if os.path.exists(\"LockedFiles/env_build.xml\"):\n os.remove(\"LockedFiles/env_build.xml\")\n\n case.flush()\n check_lockedfiles()\n\n pestot = int(run_cmd(\"Tools/taskmaker.pl -sumonly\"))\n case.set_value(\"TOTALPES\", pestot)\n\n # Compute cost based on PE count\n pval = 1\n pcnt = 0\n while pval < pestot:\n pval *= 2\n pcnt += 6 # (scaling like sqrt(6/10))\n pcost = 3 - pcnt / 10 # (3 is 64 with 6)\n\n # Compute cost based on DEBUG\n dcost = 3 if case.get_value(\"DEBUG\") else 0\n\n # Compute cost based on run length\n # For simplicity, we use a heuristic just based on STOP_OPTION (not considering\n # STOP_N), and only deal with options longer than ndays\n lcost = 0\n if \"nmonth\" in case.get_value(\"STOP_OPTION\"):\n # N months costs 30x as much as N days; since cost is based on log-base-2, add 5\n lcost = 5\n elif \"nyear\" in case.get_value(\"STOP_OPTION\"):\n # N years costs 365x as much as N days; since cost is based on log-base-2, add 9\n lcost = 9\n\n estcost = pcost + dcost + lcost\n for cost in [\"CCSM_CCOST\", \"CCSM_GCOST\", \"CCSM_TCOST\", \"CCSM_CCOST\"]:\n estcost += case.get_value(cost)\n\n case.set_value(\"CCSM_PCOST\", pcost)\n case.set_value(\"CCSM_ESTCOST\", estcost)\n\n # create batch file\n logger.info(\"Creating batch script case.run\")\n\n # Use BatchFactory to get the appropriate instance of a BatchMaker,\n # use it to create our batch scripts\n env_batch = case._get_env(\"batch\")\n for job in env_batch.get_jobs():\n input_batch_script = os.path.join(case.get_value(\"MACHDIR\"), env_batch.get_value('template', subgroup=job))\n if job == \"case.test\" and testcase is not None and not test_mode:\n logger.info(\"Writing %s script\" % job)\n testscript = os.path.join(cimeroot, \"scripts\", \"Testing\", \"Testcases\", \"%s_script\" % testcase)\n # Short term fix to be removed when csh tests are removed\n if not os.path.exists(testscript):\n env_batch.make_batch_script(input_batch_script, job, case)\n elif job != \"case.test\":\n logger.info(\"Writing %s script\" % job)\n env_batch.make_batch_script(input_batch_script, job, case)\n\n # Make a copy of env_mach_pes.xml in order to be able\n # to check that it does not change once case.setup is invoked\n logger.info(\"Locking file env_mach_pes.xml\")\n case.flush()\n shutil.copy(\"env_mach_pes.xml\", \"LockedFiles\")\n\n # Create user_nl files for the required number of instances\n if not os.path.exists(\"user_nl_cpl\"):\n logger.info(\"Creating user_nl_xxx files for components and cpl\")\n # loop over models\n for model in models:\n comp = case.get_value(\"COMP_%s\" % model)\n logger.info(\"Building %s usernl files\"%model)\n _build_usernl_files(case, model, comp)\n if comp == \"cism\":\n run_cmd(\"%s/../components/cism/cime_config/cism.template %s\" % (cimeroot, caseroot))\n\n _build_usernl_files(case, \"drv\", \"cpl\")\n\n # Run preview namelists for scripts\n logger.info(\"preview_namelists\")\n preview_namelists(case)\n\n logger.info(\"See ./CaseDoc for component namelists\")\n logger.info(\"If an old case build already exists, might want to run \\'case.build --clean-all\\' before building\")\n\n # Create test script if appropriate\n # Short term fix to be removed when csh tests are removed\n if os.path.exists(\"env_test.xml\"):\n if not os.path.exists(\"case.test\"):\n logger.info(\"Starting testcase.setup\")\n run_cmd(\"./testcase.setup -caseroot %s\" % caseroot)\n logger.info(\"Finished testcase.setup\")\n\n msg = \"case.setup complete\"\n append_status(msg, caseroot=caseroot, sfile=\"CaseStatus\")\n\n", "path": "utils/python/CIME/case_setup.py"}]}
4,064
202
gh_patches_debug_40365
rasdani/github-patches
git_diff
freqtrade__freqtrade-6560
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> inheritance strategy can not load parameter file shared from base class <!-- ## Describe your environment * Operating system: ubuntu * Python Version: 3.8.10` * CCXT version: _1.74.63 * Freqtrade Version: fraqtrade develop ## Describe the problem: if StragegyA derived from StragegyB written in same .py file, then both of them share the same json file generated by Hyperopt. They would could conflict with each other when using Hyperopt to optimize one of them after the other Strategy was optimized. ### Steps to reproduce: 1. StragegyB derived from StragegyA and they are in one .py file 2. optimize StragegyA using Hyperopt ( get file A.json) 3. optimize StragegyB using Hyperopt 4. switching step3 and step2 is the same result ### Observed Results: load file A.json as StragegyB parameter file, then ERROR ### Relevant code exceptions or logs 2022-03-20 00:59:44,224 - freqtrade.optimize.hyperopt_tools - INFO - Dumping parameters to /home/bourne/freqtrade-develop/user_data/strategies/AStrategy.json ... 2022-03-20 01:01:06,082 - freqtrade.resolvers.iresolver - INFO - Using resolved strategy BStrategy from '/home/bourne/freqtrade-develop/user_data/strategies/AStrategy.py'... 2022-03-20 01:01:06,082 - freqtrade.strategy.hyper - INFO - Loading parameters from file /home/bourne/freqtrade-develop/user_data/strategies/AStrategy.json 2022-03-20 01:01:06,082 - freqtrade - ERROR - Invalid parameter file provided. </issue> <code> [start of freqtrade/resolvers/iresolver.py] 1 # pragma pylint: disable=attribute-defined-outside-init 2 3 """ 4 This module load custom objects 5 """ 6 import importlib.util 7 import inspect 8 import logging 9 from pathlib import Path 10 from typing import Any, Dict, Iterator, List, Optional, Tuple, Type, Union 11 12 from freqtrade.exceptions import OperationalException 13 14 15 logger = logging.getLogger(__name__) 16 17 18 class IResolver: 19 """ 20 This class contains all the logic to load custom classes 21 """ 22 # Childclasses need to override this 23 object_type: Type[Any] 24 object_type_str: str 25 user_subdir: Optional[str] = None 26 initial_search_path: Optional[Path] 27 28 @classmethod 29 def build_search_paths(cls, config: Dict[str, Any], user_subdir: Optional[str] = None, 30 extra_dir: Optional[str] = None) -> List[Path]: 31 32 abs_paths: List[Path] = [] 33 if cls.initial_search_path: 34 abs_paths.append(cls.initial_search_path) 35 36 if user_subdir: 37 abs_paths.insert(0, config['user_data_dir'].joinpath(user_subdir)) 38 39 if extra_dir: 40 # Add extra directory to the top of the search paths 41 abs_paths.insert(0, Path(extra_dir).resolve()) 42 43 return abs_paths 44 45 @classmethod 46 def _get_valid_object(cls, module_path: Path, object_name: Optional[str], 47 enum_failed: bool = False) -> Iterator[Any]: 48 """ 49 Generator returning objects with matching object_type and object_name in the path given. 50 :param module_path: absolute path to the module 51 :param object_name: Class name of the object 52 :param enum_failed: If True, will return None for modules which fail. 53 Otherwise, failing modules are skipped. 54 :return: generator containing tuple of matching objects 55 Tuple format: [Object, source] 56 """ 57 58 # Generate spec based on absolute path 59 # Pass object_name as first argument to have logging print a reasonable name. 60 spec = importlib.util.spec_from_file_location(object_name or "", str(module_path)) 61 if not spec: 62 return iter([None]) 63 64 module = importlib.util.module_from_spec(spec) 65 try: 66 spec.loader.exec_module(module) # type: ignore # importlib does not use typehints 67 except (ModuleNotFoundError, SyntaxError, ImportError, NameError) as err: 68 # Catch errors in case a specific module is not installed 69 logger.warning(f"Could not import {module_path} due to '{err}'") 70 if enum_failed: 71 return iter([None]) 72 73 valid_objects_gen = ( 74 (obj, inspect.getsource(module)) for 75 name, obj in inspect.getmembers( 76 module, inspect.isclass) if ((object_name is None or object_name == name) 77 and issubclass(obj, cls.object_type) 78 and obj is not cls.object_type) 79 ) 80 return valid_objects_gen 81 82 @classmethod 83 def _search_object(cls, directory: Path, *, object_name: str, add_source: bool = False 84 ) -> Union[Tuple[Any, Path], Tuple[None, None]]: 85 """ 86 Search for the objectname in the given directory 87 :param directory: relative or absolute directory path 88 :param object_name: ClassName of the object to load 89 :return: object class 90 """ 91 logger.debug(f"Searching for {cls.object_type.__name__} {object_name} in '{directory}'") 92 for entry in directory.iterdir(): 93 # Only consider python files 94 if entry.suffix != '.py': 95 logger.debug('Ignoring %s', entry) 96 continue 97 if entry.is_symlink() and not entry.is_file(): 98 logger.debug('Ignoring broken symlink %s', entry) 99 continue 100 module_path = entry.resolve() 101 102 obj = next(cls._get_valid_object(module_path, object_name), None) 103 104 if obj: 105 obj[0].__file__ = str(entry) 106 if add_source: 107 obj[0].__source__ = obj[1] 108 return (obj[0], module_path) 109 return (None, None) 110 111 @classmethod 112 def _load_object(cls, paths: List[Path], *, object_name: str, add_source: bool = False, 113 kwargs: dict = {}) -> Optional[Any]: 114 """ 115 Try to load object from path list. 116 """ 117 118 for _path in paths: 119 try: 120 (module, module_path) = cls._search_object(directory=_path, 121 object_name=object_name, 122 add_source=add_source) 123 if module: 124 logger.info( 125 f"Using resolved {cls.object_type.__name__.lower()[1:]} {object_name} " 126 f"from '{module_path}'...") 127 return module(**kwargs) 128 except FileNotFoundError: 129 logger.warning('Path "%s" does not exist.', _path.resolve()) 130 131 return None 132 133 @classmethod 134 def load_object(cls, object_name: str, config: dict, *, kwargs: dict, 135 extra_dir: Optional[str] = None) -> Any: 136 """ 137 Search and loads the specified object as configured in hte child class. 138 :param object_name: name of the module to import 139 :param config: configuration dictionary 140 :param extra_dir: additional directory to search for the given pairlist 141 :raises: OperationalException if the class is invalid or does not exist. 142 :return: Object instance or None 143 """ 144 145 abs_paths = cls.build_search_paths(config, 146 user_subdir=cls.user_subdir, 147 extra_dir=extra_dir) 148 149 found_object = cls._load_object(paths=abs_paths, object_name=object_name, 150 kwargs=kwargs) 151 if found_object: 152 return found_object 153 raise OperationalException( 154 f"Impossible to load {cls.object_type_str} '{object_name}'. This class does not exist " 155 "or contains Python code errors." 156 ) 157 158 @classmethod 159 def search_all_objects(cls, directory: Path, 160 enum_failed: bool) -> List[Dict[str, Any]]: 161 """ 162 Searches a directory for valid objects 163 :param directory: Path to search 164 :param enum_failed: If True, will return None for modules which fail. 165 Otherwise, failing modules are skipped. 166 :return: List of dicts containing 'name', 'class' and 'location' entries 167 """ 168 logger.debug(f"Searching for {cls.object_type.__name__} '{directory}'") 169 objects = [] 170 for entry in directory.iterdir(): 171 # Only consider python files 172 if entry.suffix != '.py': 173 logger.debug('Ignoring %s', entry) 174 continue 175 module_path = entry.resolve() 176 logger.debug(f"Path {module_path}") 177 for obj in cls._get_valid_object(module_path, object_name=None, 178 enum_failed=enum_failed): 179 objects.append( 180 {'name': obj[0].__name__ if obj is not None else '', 181 'class': obj[0] if obj is not None else None, 182 'location': entry, 183 }) 184 return objects 185 [end of freqtrade/resolvers/iresolver.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/freqtrade/resolvers/iresolver.py b/freqtrade/resolvers/iresolver.py --- a/freqtrade/resolvers/iresolver.py +++ b/freqtrade/resolvers/iresolver.py @@ -6,6 +6,7 @@ import importlib.util import inspect import logging +import sys from pathlib import Path from typing import Any, Dict, Iterator, List, Optional, Tuple, Type, Union @@ -15,6 +16,22 @@ logger = logging.getLogger(__name__) +class PathModifier: + def __init__(self, path: Path): + self.path = path + + def __enter__(self): + """Inject path to allow importing with relative imports.""" + sys.path.insert(0, str(self.path)) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Undo insertion of local path.""" + str_path = str(self.path) + if str_path in sys.path: + sys.path.remove(str_path) + + class IResolver: """ This class contains all the logic to load custom classes @@ -57,27 +74,32 @@ # Generate spec based on absolute path # Pass object_name as first argument to have logging print a reasonable name. - spec = importlib.util.spec_from_file_location(object_name or "", str(module_path)) - if not spec: - return iter([None]) - - module = importlib.util.module_from_spec(spec) - try: - spec.loader.exec_module(module) # type: ignore # importlib does not use typehints - except (ModuleNotFoundError, SyntaxError, ImportError, NameError) as err: - # Catch errors in case a specific module is not installed - logger.warning(f"Could not import {module_path} due to '{err}'") - if enum_failed: + with PathModifier(module_path.parent): + module_name = module_path.stem or "" + spec = importlib.util.spec_from_file_location(module_name, str(module_path)) + if not spec: return iter([None]) - valid_objects_gen = ( - (obj, inspect.getsource(module)) for - name, obj in inspect.getmembers( - module, inspect.isclass) if ((object_name is None or object_name == name) - and issubclass(obj, cls.object_type) - and obj is not cls.object_type) - ) - return valid_objects_gen + module = importlib.util.module_from_spec(spec) + try: + spec.loader.exec_module(module) # type: ignore # importlib does not use typehints + except (ModuleNotFoundError, SyntaxError, ImportError, NameError) as err: + # Catch errors in case a specific module is not installed + logger.warning(f"Could not import {module_path} due to '{err}'") + if enum_failed: + return iter([None]) + + valid_objects_gen = ( + (obj, inspect.getsource(module)) for + name, obj in inspect.getmembers( + module, inspect.isclass) if ((object_name is None or object_name == name) + and issubclass(obj, cls.object_type) + and obj is not cls.object_type + and obj.__module__ == module_name + ) + ) + # The __module__ check ensures we only use strategies that are defined in this folder. + return valid_objects_gen @classmethod def _search_object(cls, directory: Path, *, object_name: str, add_source: bool = False
{"golden_diff": "diff --git a/freqtrade/resolvers/iresolver.py b/freqtrade/resolvers/iresolver.py\n--- a/freqtrade/resolvers/iresolver.py\n+++ b/freqtrade/resolvers/iresolver.py\n@@ -6,6 +6,7 @@\n import importlib.util\n import inspect\n import logging\n+import sys\n from pathlib import Path\n from typing import Any, Dict, Iterator, List, Optional, Tuple, Type, Union\n \n@@ -15,6 +16,22 @@\n logger = logging.getLogger(__name__)\n \n \n+class PathModifier:\n+ def __init__(self, path: Path):\n+ self.path = path\n+\n+ def __enter__(self):\n+ \"\"\"Inject path to allow importing with relative imports.\"\"\"\n+ sys.path.insert(0, str(self.path))\n+ return self\n+\n+ def __exit__(self, exc_type, exc_val, exc_tb):\n+ \"\"\"Undo insertion of local path.\"\"\"\n+ str_path = str(self.path)\n+ if str_path in sys.path:\n+ sys.path.remove(str_path)\n+\n+\n class IResolver:\n \"\"\"\n This class contains all the logic to load custom classes\n@@ -57,27 +74,32 @@\n \n # Generate spec based on absolute path\n # Pass object_name as first argument to have logging print a reasonable name.\n- spec = importlib.util.spec_from_file_location(object_name or \"\", str(module_path))\n- if not spec:\n- return iter([None])\n-\n- module = importlib.util.module_from_spec(spec)\n- try:\n- spec.loader.exec_module(module) # type: ignore # importlib does not use typehints\n- except (ModuleNotFoundError, SyntaxError, ImportError, NameError) as err:\n- # Catch errors in case a specific module is not installed\n- logger.warning(f\"Could not import {module_path} due to '{err}'\")\n- if enum_failed:\n+ with PathModifier(module_path.parent):\n+ module_name = module_path.stem or \"\"\n+ spec = importlib.util.spec_from_file_location(module_name, str(module_path))\n+ if not spec:\n return iter([None])\n \n- valid_objects_gen = (\n- (obj, inspect.getsource(module)) for\n- name, obj in inspect.getmembers(\n- module, inspect.isclass) if ((object_name is None or object_name == name)\n- and issubclass(obj, cls.object_type)\n- and obj is not cls.object_type)\n- )\n- return valid_objects_gen\n+ module = importlib.util.module_from_spec(spec)\n+ try:\n+ spec.loader.exec_module(module) # type: ignore # importlib does not use typehints\n+ except (ModuleNotFoundError, SyntaxError, ImportError, NameError) as err:\n+ # Catch errors in case a specific module is not installed\n+ logger.warning(f\"Could not import {module_path} due to '{err}'\")\n+ if enum_failed:\n+ return iter([None])\n+\n+ valid_objects_gen = (\n+ (obj, inspect.getsource(module)) for\n+ name, obj in inspect.getmembers(\n+ module, inspect.isclass) if ((object_name is None or object_name == name)\n+ and issubclass(obj, cls.object_type)\n+ and obj is not cls.object_type\n+ and obj.__module__ == module_name\n+ )\n+ )\n+ # The __module__ check ensures we only use strategies that are defined in this folder.\n+ return valid_objects_gen\n \n @classmethod\n def _search_object(cls, directory: Path, *, object_name: str, add_source: bool = False\n", "issue": "inheritance strategy can not load parameter file shared from base class\n<!-- \r\n\r\n\r\n## Describe your environment\r\n\r\n * Operating system: ubuntu\r\n * Python Version: 3.8.10`\r\n * CCXT version: _1.74.63\r\n * Freqtrade Version: fraqtrade develop\r\n \r\n\r\n## Describe the problem:\r\n\r\nif StragegyA derived from StragegyB written in same .py file, \r\nthen both of them share the same json file generated by Hyperopt.\r\n\r\nThey would could conflict with each other when using Hyperopt to optimize one of them after the other Strategy was optimized.\r\n\r\n\r\n### Steps to reproduce:\r\n\r\n 1. StragegyB derived from StragegyA and they are in one .py file\r\n 2. optimize StragegyA using Hyperopt ( get file A.json)\r\n 3. optimize StragegyB using Hyperopt \r\n 4. switching step3 and step2 is the same result\r\n \r\n### Observed Results:\r\nload file A.json as StragegyB parameter file, then ERROR\r\n\r\n### Relevant code exceptions or logs\r\n\r\n2022-03-20 00:59:44,224 - freqtrade.optimize.hyperopt_tools - INFO - Dumping parameters to /home/bourne/freqtrade-develop/user_data/strategies/AStrategy.json\r\n\r\n...\r\n\r\n2022-03-20 01:01:06,082 - freqtrade.resolvers.iresolver - INFO - Using resolved strategy BStrategy from '/home/bourne/freqtrade-develop/user_data/strategies/AStrategy.py'...\r\n2022-03-20 01:01:06,082 - freqtrade.strategy.hyper - INFO - Loading parameters from file /home/bourne/freqtrade-develop/user_data/strategies/AStrategy.json\r\n2022-03-20 01:01:06,082 - freqtrade - ERROR - Invalid parameter file provided.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# pragma pylint: disable=attribute-defined-outside-init\n\n\"\"\"\nThis module load custom objects\n\"\"\"\nimport importlib.util\nimport inspect\nimport logging\nfrom pathlib import Path\nfrom typing import Any, Dict, Iterator, List, Optional, Tuple, Type, Union\n\nfrom freqtrade.exceptions import OperationalException\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass IResolver:\n \"\"\"\n This class contains all the logic to load custom classes\n \"\"\"\n # Childclasses need to override this\n object_type: Type[Any]\n object_type_str: str\n user_subdir: Optional[str] = None\n initial_search_path: Optional[Path]\n\n @classmethod\n def build_search_paths(cls, config: Dict[str, Any], user_subdir: Optional[str] = None,\n extra_dir: Optional[str] = None) -> List[Path]:\n\n abs_paths: List[Path] = []\n if cls.initial_search_path:\n abs_paths.append(cls.initial_search_path)\n\n if user_subdir:\n abs_paths.insert(0, config['user_data_dir'].joinpath(user_subdir))\n\n if extra_dir:\n # Add extra directory to the top of the search paths\n abs_paths.insert(0, Path(extra_dir).resolve())\n\n return abs_paths\n\n @classmethod\n def _get_valid_object(cls, module_path: Path, object_name: Optional[str],\n enum_failed: bool = False) -> Iterator[Any]:\n \"\"\"\n Generator returning objects with matching object_type and object_name in the path given.\n :param module_path: absolute path to the module\n :param object_name: Class name of the object\n :param enum_failed: If True, will return None for modules which fail.\n Otherwise, failing modules are skipped.\n :return: generator containing tuple of matching objects\n Tuple format: [Object, source]\n \"\"\"\n\n # Generate spec based on absolute path\n # Pass object_name as first argument to have logging print a reasonable name.\n spec = importlib.util.spec_from_file_location(object_name or \"\", str(module_path))\n if not spec:\n return iter([None])\n\n module = importlib.util.module_from_spec(spec)\n try:\n spec.loader.exec_module(module) # type: ignore # importlib does not use typehints\n except (ModuleNotFoundError, SyntaxError, ImportError, NameError) as err:\n # Catch errors in case a specific module is not installed\n logger.warning(f\"Could not import {module_path} due to '{err}'\")\n if enum_failed:\n return iter([None])\n\n valid_objects_gen = (\n (obj, inspect.getsource(module)) for\n name, obj in inspect.getmembers(\n module, inspect.isclass) if ((object_name is None or object_name == name)\n and issubclass(obj, cls.object_type)\n and obj is not cls.object_type)\n )\n return valid_objects_gen\n\n @classmethod\n def _search_object(cls, directory: Path, *, object_name: str, add_source: bool = False\n ) -> Union[Tuple[Any, Path], Tuple[None, None]]:\n \"\"\"\n Search for the objectname in the given directory\n :param directory: relative or absolute directory path\n :param object_name: ClassName of the object to load\n :return: object class\n \"\"\"\n logger.debug(f\"Searching for {cls.object_type.__name__} {object_name} in '{directory}'\")\n for entry in directory.iterdir():\n # Only consider python files\n if entry.suffix != '.py':\n logger.debug('Ignoring %s', entry)\n continue\n if entry.is_symlink() and not entry.is_file():\n logger.debug('Ignoring broken symlink %s', entry)\n continue\n module_path = entry.resolve()\n\n obj = next(cls._get_valid_object(module_path, object_name), None)\n\n if obj:\n obj[0].__file__ = str(entry)\n if add_source:\n obj[0].__source__ = obj[1]\n return (obj[0], module_path)\n return (None, None)\n\n @classmethod\n def _load_object(cls, paths: List[Path], *, object_name: str, add_source: bool = False,\n kwargs: dict = {}) -> Optional[Any]:\n \"\"\"\n Try to load object from path list.\n \"\"\"\n\n for _path in paths:\n try:\n (module, module_path) = cls._search_object(directory=_path,\n object_name=object_name,\n add_source=add_source)\n if module:\n logger.info(\n f\"Using resolved {cls.object_type.__name__.lower()[1:]} {object_name} \"\n f\"from '{module_path}'...\")\n return module(**kwargs)\n except FileNotFoundError:\n logger.warning('Path \"%s\" does not exist.', _path.resolve())\n\n return None\n\n @classmethod\n def load_object(cls, object_name: str, config: dict, *, kwargs: dict,\n extra_dir: Optional[str] = None) -> Any:\n \"\"\"\n Search and loads the specified object as configured in hte child class.\n :param object_name: name of the module to import\n :param config: configuration dictionary\n :param extra_dir: additional directory to search for the given pairlist\n :raises: OperationalException if the class is invalid or does not exist.\n :return: Object instance or None\n \"\"\"\n\n abs_paths = cls.build_search_paths(config,\n user_subdir=cls.user_subdir,\n extra_dir=extra_dir)\n\n found_object = cls._load_object(paths=abs_paths, object_name=object_name,\n kwargs=kwargs)\n if found_object:\n return found_object\n raise OperationalException(\n f\"Impossible to load {cls.object_type_str} '{object_name}'. This class does not exist \"\n \"or contains Python code errors.\"\n )\n\n @classmethod\n def search_all_objects(cls, directory: Path,\n enum_failed: bool) -> List[Dict[str, Any]]:\n \"\"\"\n Searches a directory for valid objects\n :param directory: Path to search\n :param enum_failed: If True, will return None for modules which fail.\n Otherwise, failing modules are skipped.\n :return: List of dicts containing 'name', 'class' and 'location' entries\n \"\"\"\n logger.debug(f\"Searching for {cls.object_type.__name__} '{directory}'\")\n objects = []\n for entry in directory.iterdir():\n # Only consider python files\n if entry.suffix != '.py':\n logger.debug('Ignoring %s', entry)\n continue\n module_path = entry.resolve()\n logger.debug(f\"Path {module_path}\")\n for obj in cls._get_valid_object(module_path, object_name=None,\n enum_failed=enum_failed):\n objects.append(\n {'name': obj[0].__name__ if obj is not None else '',\n 'class': obj[0] if obj is not None else None,\n 'location': entry,\n })\n return objects\n", "path": "freqtrade/resolvers/iresolver.py"}]}
2,946
796
gh_patches_debug_9537
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-1452
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SMAPE formula typo ## 📚 Documentation There's a typo in the [SMAPE formula](https://torchmetrics.readthedocs.io/en/stable/regression/symmetric_mean_absolute_percentage_error.html). It should be `{SMAPE} = \frac{2}{n}\sum_1^n\frac{| y_i - \hat{y_i} |}{\max(| y_i | + | \hat{y_i} |, \epsilon)}` instead of `{SMAPE} = \frac{2}{n}\sum_1^n max(\frac{| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon})`. The attached screenshot shows the typo and its correction. ![smape](https://user-images.githubusercontent.com/44662992/213825207-21c308b1-a407-4830-813f-a3dbe87ddb41.png) </issue> <code> [start of src/torchmetrics/regression/symmetric_mape.py] 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from typing import Any 15 16 from torch import Tensor, tensor 17 18 from torchmetrics.functional.regression.symmetric_mape import ( 19 _symmetric_mean_absolute_percentage_error_compute, 20 _symmetric_mean_absolute_percentage_error_update, 21 ) 22 from torchmetrics.metric import Metric 23 24 25 class SymmetricMeanAbsolutePercentageError(Metric): 26 r"""Computes symmetric mean absolute percentage error (`SMAPE`_). 27 28 .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n max(\frac{| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon}) 29 30 Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. 31 32 As input to ``forward`` and ``update`` the metric accepts the following input: 33 34 - ``preds`` (:class:`~torch.Tensor`): Predictions from model 35 - ``target`` (:class:`~torch.Tensor`): Ground truth values 36 37 As output of ``forward`` and ``compute`` the metric returns the following output: 38 39 - ``smape`` (:class:`~torch.Tensor`): A tensor with non-negative floating point smape value between 0 and 1 40 41 Args: 42 kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. 43 44 Example: 45 >>> from torchmetrics import SymmetricMeanAbsolutePercentageError 46 >>> target = tensor([1, 10, 1e6]) 47 >>> preds = tensor([0.9, 15, 1.2e6]) 48 >>> smape = SymmetricMeanAbsolutePercentageError() 49 >>> smape(preds, target) 50 tensor(0.2290) 51 """ 52 is_differentiable: bool = True 53 higher_is_better: bool = False 54 full_state_update: bool = False 55 sum_abs_per_error: Tensor 56 total: Tensor 57 58 def __init__( 59 self, 60 **kwargs: Any, 61 ) -> None: 62 super().__init__(**kwargs) 63 64 self.add_state("sum_abs_per_error", default=tensor(0.0), dist_reduce_fx="sum") 65 self.add_state("total", default=tensor(0.0), dist_reduce_fx="sum") 66 67 def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore 68 """Update state with predictions and targets.""" 69 sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target) 70 71 self.sum_abs_per_error += sum_abs_per_error 72 self.total += num_obs 73 74 def compute(self) -> Tensor: 75 """Computes mean absolute percentage error over state.""" 76 return _symmetric_mean_absolute_percentage_error_compute(self.sum_abs_per_error, self.total) 77 [end of src/torchmetrics/regression/symmetric_mape.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/torchmetrics/regression/symmetric_mape.py b/src/torchmetrics/regression/symmetric_mape.py --- a/src/torchmetrics/regression/symmetric_mape.py +++ b/src/torchmetrics/regression/symmetric_mape.py @@ -25,7 +25,7 @@ class SymmetricMeanAbsolutePercentageError(Metric): r"""Computes symmetric mean absolute percentage error (`SMAPE`_). - .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n max(\frac{| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon}) + .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n\frac{| y_i - \hat{y_i} |}{\max(| y_i | + | \hat{y_i} |, \epsilon)} Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
{"golden_diff": "diff --git a/src/torchmetrics/regression/symmetric_mape.py b/src/torchmetrics/regression/symmetric_mape.py\n--- a/src/torchmetrics/regression/symmetric_mape.py\n+++ b/src/torchmetrics/regression/symmetric_mape.py\n@@ -25,7 +25,7 @@\n class SymmetricMeanAbsolutePercentageError(Metric):\n r\"\"\"Computes symmetric mean absolute percentage error (`SMAPE`_).\n \n- .. math:: \\text{SMAPE} = \\frac{2}{n}\\sum_1^n max(\\frac{| y_i - \\hat{y_i} |}{| y_i | + | \\hat{y_i} |, \\epsilon})\n+ .. math:: \\text{SMAPE} = \\frac{2}{n}\\sum_1^n\\frac{| y_i - \\hat{y_i} |}{\\max(| y_i | + | \\hat{y_i} |, \\epsilon)}\n \n Where :math:`y` is a tensor of target values, and :math:`\\hat{y}` is a tensor of predictions.\n", "issue": "SMAPE formula typo\n## \ud83d\udcda Documentation\r\n\r\n\r\nThere's a typo in the [SMAPE formula](https://torchmetrics.readthedocs.io/en/stable/regression/symmetric_mean_absolute_percentage_error.html). It should be `{SMAPE} = \\frac{2}{n}\\sum_1^n\\frac{| y_i - \\hat{y_i} |}{\\max(| y_i | + | \\hat{y_i} |, \\epsilon)}` instead of `{SMAPE} = \\frac{2}{n}\\sum_1^n max(\\frac{| y_i - \\hat{y_i} |}{| y_i | + | \\hat{y_i} |, \\epsilon})`. The attached screenshot shows the typo and its correction.\r\n![smape](https://user-images.githubusercontent.com/44662992/213825207-21c308b1-a407-4830-813f-a3dbe87ddb41.png)\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any\n\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.regression.symmetric_mape import (\n _symmetric_mean_absolute_percentage_error_compute,\n _symmetric_mean_absolute_percentage_error_update,\n)\nfrom torchmetrics.metric import Metric\n\n\nclass SymmetricMeanAbsolutePercentageError(Metric):\n r\"\"\"Computes symmetric mean absolute percentage error (`SMAPE`_).\n\n .. math:: \\text{SMAPE} = \\frac{2}{n}\\sum_1^n max(\\frac{| y_i - \\hat{y_i} |}{| y_i | + | \\hat{y_i} |, \\epsilon})\n\n Where :math:`y` is a tensor of target values, and :math:`\\hat{y}` is a tensor of predictions.\n\n As input to ``forward`` and ``update`` the metric accepts the following input:\n\n - ``preds`` (:class:`~torch.Tensor`): Predictions from model\n - ``target`` (:class:`~torch.Tensor`): Ground truth values\n\n As output of ``forward`` and ``compute`` the metric returns the following output:\n\n - ``smape`` (:class:`~torch.Tensor`): A tensor with non-negative floating point smape value between 0 and 1\n\n Args:\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Example:\n >>> from torchmetrics import SymmetricMeanAbsolutePercentageError\n >>> target = tensor([1, 10, 1e6])\n >>> preds = tensor([0.9, 15, 1.2e6])\n >>> smape = SymmetricMeanAbsolutePercentageError()\n >>> smape(preds, target)\n tensor(0.2290)\n \"\"\"\n is_differentiable: bool = True\n higher_is_better: bool = False\n full_state_update: bool = False\n sum_abs_per_error: Tensor\n total: Tensor\n\n def __init__(\n self,\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n\n self.add_state(\"sum_abs_per_error\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Update state with predictions and targets.\"\"\"\n sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target)\n\n self.sum_abs_per_error += sum_abs_per_error\n self.total += num_obs\n\n def compute(self) -> Tensor:\n \"\"\"Computes mean absolute percentage error over state.\"\"\"\n return _symmetric_mean_absolute_percentage_error_compute(self.sum_abs_per_error, self.total)\n", "path": "src/torchmetrics/regression/symmetric_mape.py"}]}
1,661
241
gh_patches_debug_11153
rasdani/github-patches
git_diff
open-mmlab__mmsegmentation-19
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> FileNotFoundError: [Errno 2] No such file or directory: 'VOCdevkit/VOCaug/dataset/trainval.txt' https://github.com/open-mmlab/mmsegmentation/blob/1c3f54765981ba352d4cf6582edb1c8915e51d71/tools/convert_datasets/voc_aug.py#L53 Directory `VOCdevkit/VOCaug/dataset` does not exist `trainval.txt`, `trainval.txt` is the merger of `train.txt` and `val.txt`? </issue> <code> [start of tools/convert_datasets/voc_aug.py] 1 import argparse 2 import os.path as osp 3 from functools import partial 4 5 import mmcv 6 import numpy as np 7 from PIL import Image 8 from scipy.io import loadmat 9 10 AUG_LEN = 10582 11 12 13 def convert_mat(mat_file, in_dir, out_dir): 14 data = loadmat(osp.join(in_dir, mat_file)) 15 mask = data['GTcls'][0]['Segmentation'][0].astype(np.uint8) 16 seg_filename = osp.join(out_dir, mat_file.replace('.mat', '.png')) 17 Image.fromarray(mask).save(seg_filename, 'PNG') 18 19 20 def generate_aug_list(merged_list, excluded_list): 21 return list(set(merged_list) - set(excluded_list)) 22 23 24 def parse_args(): 25 parser = argparse.ArgumentParser( 26 description='Convert PASCAL VOC annotations to mmsegmentation format') 27 parser.add_argument('devkit_path', help='pascal voc devkit path') 28 parser.add_argument('aug_path', help='pascal voc aug path') 29 parser.add_argument('-o', '--out_dir', help='output path') 30 parser.add_argument( 31 '--nproc', default=1, type=int, help='number of process') 32 args = parser.parse_args() 33 return args 34 35 36 def main(): 37 args = parse_args() 38 devkit_path = args.devkit_path 39 aug_path = args.aug_path 40 nproc = args.nproc 41 if args.out_dir is None: 42 out_dir = osp.join(devkit_path, 'VOC2012', 'SegmentationClassAug') 43 else: 44 out_dir = args.out_dir 45 mmcv.mkdir_or_exist(out_dir) 46 in_dir = osp.join(aug_path, 'dataset', 'cls') 47 48 mmcv.track_parallel_progress( 49 partial(convert_mat, in_dir=in_dir, out_dir=out_dir), 50 list(mmcv.scandir(in_dir, suffix='.mat')), 51 nproc=nproc) 52 53 with open(osp.join(aug_path, 'dataset', 'trainval.txt')) as f: 54 full_aug_list = [line.strip() for line in f] 55 with open( 56 osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 57 'train.txt')) as f: 58 ori_train_list = [line.strip() for line in f] 59 with open( 60 osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 61 'val.txt')) as f: 62 val_list = [line.strip() for line in f] 63 64 aug_train_list = generate_aug_list(ori_train_list + full_aug_list, 65 val_list) 66 assert len(aug_train_list) == AUG_LEN, 'len(aug_train_list) != {}'.format( 67 AUG_LEN) 68 69 with open( 70 osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 71 'trainaug.txt'), 'w') as f: 72 f.writelines(line + '\n' for line in aug_train_list) 73 74 aug_list = generate_aug_list(full_aug_list, ori_train_list + val_list) 75 assert len(aug_list) == AUG_LEN - len( 76 ori_train_list), 'len(aug_list) != {}'.format(AUG_LEN - 77 len(ori_train_list)) 78 with open( 79 osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'aug.txt'), 80 'w') as f: 81 f.writelines(line + '\n' for line in aug_list) 82 83 print('Done!') 84 85 86 if __name__ == '__main__': 87 main() 88 [end of tools/convert_datasets/voc_aug.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/convert_datasets/voc_aug.py b/tools/convert_datasets/voc_aug.py --- a/tools/convert_datasets/voc_aug.py +++ b/tools/convert_datasets/voc_aug.py @@ -50,8 +50,12 @@ list(mmcv.scandir(in_dir, suffix='.mat')), nproc=nproc) - with open(osp.join(aug_path, 'dataset', 'trainval.txt')) as f: - full_aug_list = [line.strip() for line in f] + full_aug_list = [] + with open(osp.join(aug_path, 'dataset', 'train.txt')) as f: + full_aug_list += [line.strip() for line in f] + with open(osp.join(aug_path, 'dataset', 'val.txt')) as f: + full_aug_list += [line.strip() for line in f] + with open( osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'train.txt')) as f:
{"golden_diff": "diff --git a/tools/convert_datasets/voc_aug.py b/tools/convert_datasets/voc_aug.py\n--- a/tools/convert_datasets/voc_aug.py\n+++ b/tools/convert_datasets/voc_aug.py\n@@ -50,8 +50,12 @@\n list(mmcv.scandir(in_dir, suffix='.mat')),\n nproc=nproc)\n \n- with open(osp.join(aug_path, 'dataset', 'trainval.txt')) as f:\n- full_aug_list = [line.strip() for line in f]\n+ full_aug_list = []\n+ with open(osp.join(aug_path, 'dataset', 'train.txt')) as f:\n+ full_aug_list += [line.strip() for line in f]\n+ with open(osp.join(aug_path, 'dataset', 'val.txt')) as f:\n+ full_aug_list += [line.strip() for line in f]\n+\n with open(\n osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',\n 'train.txt')) as f:\n", "issue": "FileNotFoundError: [Errno 2] No such file or directory: 'VOCdevkit/VOCaug/dataset/trainval.txt'\nhttps://github.com/open-mmlab/mmsegmentation/blob/1c3f54765981ba352d4cf6582edb1c8915e51d71/tools/convert_datasets/voc_aug.py#L53\r\n\r\nDirectory `VOCdevkit/VOCaug/dataset` does not exist `trainval.txt`, `trainval.txt` is the merger of `train.txt` and `val.txt`?\n", "before_files": [{"content": "import argparse\nimport os.path as osp\nfrom functools import partial\n\nimport mmcv\nimport numpy as np\nfrom PIL import Image\nfrom scipy.io import loadmat\n\nAUG_LEN = 10582\n\n\ndef convert_mat(mat_file, in_dir, out_dir):\n data = loadmat(osp.join(in_dir, mat_file))\n mask = data['GTcls'][0]['Segmentation'][0].astype(np.uint8)\n seg_filename = osp.join(out_dir, mat_file.replace('.mat', '.png'))\n Image.fromarray(mask).save(seg_filename, 'PNG')\n\n\ndef generate_aug_list(merged_list, excluded_list):\n return list(set(merged_list) - set(excluded_list))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Convert PASCAL VOC annotations to mmsegmentation format')\n parser.add_argument('devkit_path', help='pascal voc devkit path')\n parser.add_argument('aug_path', help='pascal voc aug path')\n parser.add_argument('-o', '--out_dir', help='output path')\n parser.add_argument(\n '--nproc', default=1, type=int, help='number of process')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n devkit_path = args.devkit_path\n aug_path = args.aug_path\n nproc = args.nproc\n if args.out_dir is None:\n out_dir = osp.join(devkit_path, 'VOC2012', 'SegmentationClassAug')\n else:\n out_dir = args.out_dir\n mmcv.mkdir_or_exist(out_dir)\n in_dir = osp.join(aug_path, 'dataset', 'cls')\n\n mmcv.track_parallel_progress(\n partial(convert_mat, in_dir=in_dir, out_dir=out_dir),\n list(mmcv.scandir(in_dir, suffix='.mat')),\n nproc=nproc)\n\n with open(osp.join(aug_path, 'dataset', 'trainval.txt')) as f:\n full_aug_list = [line.strip() for line in f]\n with open(\n osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',\n 'train.txt')) as f:\n ori_train_list = [line.strip() for line in f]\n with open(\n osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',\n 'val.txt')) as f:\n val_list = [line.strip() for line in f]\n\n aug_train_list = generate_aug_list(ori_train_list + full_aug_list,\n val_list)\n assert len(aug_train_list) == AUG_LEN, 'len(aug_train_list) != {}'.format(\n AUG_LEN)\n\n with open(\n osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',\n 'trainaug.txt'), 'w') as f:\n f.writelines(line + '\\n' for line in aug_train_list)\n\n aug_list = generate_aug_list(full_aug_list, ori_train_list + val_list)\n assert len(aug_list) == AUG_LEN - len(\n ori_train_list), 'len(aug_list) != {}'.format(AUG_LEN -\n len(ori_train_list))\n with open(\n osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'aug.txt'),\n 'w') as f:\n f.writelines(line + '\\n' for line in aug_list)\n\n print('Done!')\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/convert_datasets/voc_aug.py"}]}
1,607
228
gh_patches_debug_27576
rasdani/github-patches
git_diff
pytorch__text-192
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> len of iterator incorrect for dynamic batching The `__len__` method of `Iterator` (defined [here](https://github.com/pytorch/text/blob/master/torchtext/data/iterator.py#L157)) returns a wrong result for dynamic batching (i.e. if [batch_size_fn](https://github.com/pytorch/text/blob/master/torchtext/data/iterator.py#L50) is not None). For example if we set `batch_size_fn` to ``` lambda x, n, b: b + len(x.text) ``` there might be more or fewer batches in the dataset than the `__len__` outputs. </issue> <code> [start of torchtext/data/iterator.py] 1 from __future__ import division 2 3 import math 4 import random 5 from contextlib import contextmanager 6 from copy import deepcopy 7 8 from .batch import Batch 9 from .dataset import Dataset 10 11 12 class RandomShuffler(object): 13 """Use random functions while keeping track of the random state to make it 14 reproducible and deterministic.""" 15 16 def __init__(self, random_state=None): 17 self._random_state = random_state 18 if self._random_state is None: 19 self._random_state = random.getstate() 20 21 @contextmanager 22 def use_internal_state(self): 23 """Use a specific RNG state.""" 24 old_state = random.getstate() 25 random.setstate(self._random_state) 26 yield 27 self._random_state = random.getstate() 28 random.setstate(old_state) 29 30 @property 31 def random_state(self): 32 return deepcopy(self._random_state) 33 34 @random_state.setter 35 def random_state(self, s): 36 self._random_state = s 37 38 def __call__(self, data): 39 """Shuffle and return a new list.""" 40 with self.use_internal_state(): 41 return random.sample(data, len(data)) 42 43 44 class Iterator(object): 45 """Defines an iterator that loads batches of data from a Dataset. 46 47 Attributes: 48 dataset: The Dataset object to load Examples from. 49 batch_size: Batch size. 50 batch_size_fn: Function of three arguments (new example to add, current 51 count of examples in the batch, and current effective batch size) 52 that returns the new effective batch size resulting from adding 53 that example to a batch. This is useful for dynamic batching, where 54 this function would add to the current effective batch size the 55 number of tokens in the new example. 56 sort_key: A key to use for sorting examples in order to batch together 57 examples with similar lengths and minimize padding. The sort_key 58 provided to the Iterator constructor overrides the sort_key 59 attribute of the Dataset, or defers to it if None. 60 train: Whether the iterator represents a train set. 61 repeat: Whether to repeat the iterator for multiple epochs. 62 shuffle: Whether to shuffle examples between epochs. 63 sort: Whether to sort examples according to self.sort_key. 64 Note that repeat, shuffle, and sort default to train, train, and 65 (not train). 66 sort_within_batch: Whether to sort (in descending order according to 67 self.sort_key) within each batch. If None, defaults to self.sort. 68 If self.sort is True and this is False, the batch is left in the 69 original (ascending) sorted order. 70 device: Device to create batches on. Use -1 for CPU and None for the 71 currently active GPU device. 72 """ 73 74 def __init__(self, dataset, batch_size, sort_key=None, device=None, 75 batch_size_fn=lambda new, count, sofar: count, train=True, 76 repeat=None, shuffle=None, sort=None, 77 sort_within_batch=None): 78 self.batch_size, self.train, self.dataset = batch_size, train, dataset 79 self.batch_size_fn = batch_size_fn 80 self.iterations = 0 81 self.repeat = train if repeat is None else repeat 82 self.shuffle = train if shuffle is None else shuffle 83 self.sort = not train if sort is None else sort 84 if sort_within_batch is None: 85 self.sort_within_batch = self.sort 86 else: 87 self.sort_within_batch = sort_within_batch 88 if sort_key is None: 89 self.sort_key = dataset.sort_key 90 else: 91 self.sort_key = sort_key 92 self.device = device 93 94 self.random_shuffler = RandomShuffler() 95 96 # For state loading/saving only 97 self._iterations_this_epoch = 0 98 self._random_state_this_epoch = None 99 self._restored_from_state = False 100 101 @classmethod 102 def splits(cls, datasets, batch_sizes=None, **kwargs): 103 """Create Iterator objects for multiple splits of a dataset. 104 105 Arguments: 106 datasets: Tuple of Dataset objects corresponding to the splits. The 107 first such object should be the train set. 108 batch_sizes: Tuple of batch sizes to use for the different splits, 109 or None to use the same batch_size for all splits. 110 Remaining keyword arguments: Passed to the constructor of the 111 iterator class being used. 112 """ 113 if batch_sizes is None: 114 batch_sizes = [kwargs.pop('batch_size')] * len(datasets) 115 ret = [] 116 for i in range(len(datasets)): 117 train = i == 0 118 ret.append(cls( 119 datasets[i], batch_size=batch_sizes[i], train=train, **kwargs)) 120 return tuple(ret) 121 122 def data(self): 123 """Return the examples in the dataset in order, sorted, or shuffled.""" 124 if self.sort: 125 xs = sorted(self.dataset, key=self.sort_key) 126 elif self.shuffle: 127 xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))] 128 else: 129 xs = self.dataset 130 return xs 131 132 def init_epoch(self): 133 """Set up the batch generator for a new epoch.""" 134 135 if self._restored_from_state: 136 self.random_shuffler.random_state = self._random_state_this_epoch 137 else: 138 self._random_state_this_epoch = self.random_shuffler.random_state 139 140 self.create_batches() 141 142 if self._restored_from_state: 143 self._restored_from_state = False 144 else: 145 self._iterations_this_epoch = 0 146 147 if not self.repeat: 148 self.iterations = 0 149 150 def create_batches(self): 151 self.batches = batch(self.data(), self.batch_size, self.batch_size_fn) 152 153 @property 154 def epoch(self): 155 return self.iterations / len(self) 156 157 def __len__(self): 158 return math.ceil(len(self.dataset) / self.batch_size) 159 160 def __iter__(self): 161 while True: 162 self.init_epoch() 163 for idx, minibatch in enumerate(self.batches): 164 # fast-forward if loaded from state 165 if self._iterations_this_epoch > idx: 166 continue 167 self.iterations += 1 168 self._iterations_this_epoch += 1 169 if self.sort_within_batch: 170 # NOTE: `rnn.pack_padded_sequence` requires that a minibatch 171 # be sorted by decreasing order, which requires reversing 172 # relative to typical sort keys 173 if self.sort: 174 minibatch.reverse() 175 else: 176 minibatch.sort(key=self.sort_key, reverse=True) 177 yield Batch(minibatch, self.dataset, self.device, 178 self.train) 179 if not self.repeat: 180 raise StopIteration 181 182 def state_dict(self): 183 return { 184 "iterations": self.iterations, 185 "iterations_this_epoch": self._iterations_this_epoch, 186 "random_state_this_epoch": self._random_state_this_epoch} 187 188 def load_state_dict(self, state_dict): 189 self.iterations = state_dict["iterations"] 190 self._iterations_this_epoch = state_dict["iterations_this_epoch"] 191 self._random_state_this_epoch = state_dict["random_state_this_epoch"] 192 self._restored_from_state = True 193 194 195 class BPTTIterator(Iterator): 196 """Defines an iterator for language modeling tasks that use BPTT. 197 198 Provides contiguous streams of examples together with targets that are 199 one timestep further forward, for language modeling training with 200 backpropagation through time (BPTT). Expects a Dataset with a single 201 example and a single field called 'text' and produces Batches with text and 202 target attributes. 203 204 Attributes: 205 dataset: The Dataset object to load Examples from. 206 batch_size: Batch size. 207 bptt_len: Length of sequences for backpropagation through time. 208 sort_key: A key to use for sorting examples in order to batch together 209 examples with similar lengths and minimize padding. The sort_key 210 provided to the Iterator constructor overrides the sort_key 211 attribute of the Dataset, or defers to it if None. 212 train: Whether the iterator represents a train set. 213 repeat: Whether to repeat the iterator for multiple epochs. 214 shuffle: Whether to shuffle examples between epochs. 215 sort: Whether to sort examples according to self.sort_key. 216 Note that repeat, shuffle, and sort default to train, train, and 217 (not train). 218 device: Device to create batches on. Use -1 for CPU and None for the 219 currently active GPU device. 220 """ 221 222 def __init__(self, dataset, batch_size, bptt_len, **kwargs): 223 self.bptt_len = bptt_len 224 super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs) 225 226 def __len__(self): 227 return math.ceil((len(self.dataset[0].text) / self.batch_size - 1) / 228 self.bptt_len) 229 230 def __iter__(self): 231 text = self.dataset[0].text 232 TEXT = self.dataset.fields['text'] 233 TEXT.eos_token = None 234 text = text + ([TEXT.pad_token] * int(math.ceil(len(text) / self.batch_size) * 235 self.batch_size - len(text))) 236 data = TEXT.numericalize( 237 [text], device=self.device, train=self.train) 238 data = data.view(self.batch_size, -1).t().contiguous() 239 dataset = Dataset(examples=self.dataset.examples, fields=[ 240 ('text', TEXT), ('target', TEXT)]) 241 while True: 242 for i in range(0, len(self) * self.bptt_len, self.bptt_len): 243 seq_len = min(self.bptt_len, len(data) - i - 1) 244 yield Batch.fromvars( 245 dataset, self.batch_size, train=self.train, 246 text=data[i:i + seq_len], 247 target=data[i + 1:i + 1 + seq_len]) 248 if not self.repeat: 249 raise StopIteration 250 251 252 class BucketIterator(Iterator): 253 """Defines an iterator that batches examples of similar lengths together. 254 255 Minimizes amount of padding needed while producing freshly shuffled 256 batches for each new epoch. See pool for the bucketing procedure used. 257 """ 258 259 def create_batches(self): 260 if self.sort: 261 self.batches = batch(self.data(), self.batch_size, 262 self.batch_size_fn) 263 else: 264 self.batches = pool(self.data(), self.batch_size, 265 self.sort_key, self.batch_size_fn, 266 random_shuffler=self.random_shuffler) 267 268 269 def batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count): 270 """Yield elements from data in chunks of batch_size.""" 271 minibatch, size_so_far = [], 0 272 for ex in data: 273 minibatch.append(ex) 274 size_so_far = batch_size_fn(ex, len(minibatch), size_so_far) 275 if size_so_far == batch_size: 276 yield minibatch 277 minibatch, size_so_far = [], 0 278 elif size_so_far > batch_size: 279 yield minibatch[:-1] 280 minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0) 281 if minibatch: 282 yield minibatch 283 284 285 def pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count, 286 random_shuffler=None): 287 """Sort within buckets, then batch, then shuffle batches. 288 289 Partitions data into chunks of size 100*batch_size, sorts examples within 290 each chunk using sort_key, then batch these examples and shuffle the 291 batches. 292 """ 293 if random_shuffler is None: 294 random_shuffler = random.shuffle 295 for p in batch(data, batch_size * 100, batch_size_fn): 296 p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn) 297 for b in random_shuffler(list(p_batch)): 298 yield b 299 [end of torchtext/data/iterator.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchtext/data/iterator.py b/torchtext/data/iterator.py --- a/torchtext/data/iterator.py +++ b/torchtext/data/iterator.py @@ -72,7 +72,7 @@ """ def __init__(self, dataset, batch_size, sort_key=None, device=None, - batch_size_fn=lambda new, count, sofar: count, train=True, + batch_size_fn=None, train=True, repeat=None, shuffle=None, sort=None, sort_within_batch=None): self.batch_size, self.train, self.dataset = batch_size, train, dataset @@ -155,6 +155,8 @@ return self.iterations / len(self) def __len__(self): + if self.batch_size_fn is not None: + raise NotImplementedError return math.ceil(len(self.dataset) / self.batch_size) def __iter__(self): @@ -266,8 +268,11 @@ random_shuffler=self.random_shuffler) -def batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count): +def batch(data, batch_size, batch_size_fn=None): """Yield elements from data in chunks of batch_size.""" + if batch_size_fn is None: + def batch_size_fn(new, count, sofar): + return count minibatch, size_so_far = [], 0 for ex in data: minibatch.append(ex)
{"golden_diff": "diff --git a/torchtext/data/iterator.py b/torchtext/data/iterator.py\n--- a/torchtext/data/iterator.py\n+++ b/torchtext/data/iterator.py\n@@ -72,7 +72,7 @@\n \"\"\"\n \n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n- batch_size_fn=lambda new, count, sofar: count, train=True,\n+ batch_size_fn=None, train=True,\n repeat=None, shuffle=None, sort=None,\n sort_within_batch=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n@@ -155,6 +155,8 @@\n return self.iterations / len(self)\n \n def __len__(self):\n+ if self.batch_size_fn is not None:\n+ raise NotImplementedError\n return math.ceil(len(self.dataset) / self.batch_size)\n \n def __iter__(self):\n@@ -266,8 +268,11 @@\n random_shuffler=self.random_shuffler)\n \n \n-def batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):\n+def batch(data, batch_size, batch_size_fn=None):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n+ if batch_size_fn is None:\n+ def batch_size_fn(new, count, sofar):\n+ return count\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n", "issue": "len of iterator incorrect for dynamic batching\nThe `__len__` method of `Iterator` (defined [here](https://github.com/pytorch/text/blob/master/torchtext/data/iterator.py#L157)) returns a wrong result for dynamic batching (i.e. if [batch_size_fn](https://github.com/pytorch/text/blob/master/torchtext/data/iterator.py#L50) is not None).\r\n\r\nFor example if we set `batch_size_fn` to\r\n```\r\nlambda x, n, b: b + len(x.text)\r\n```\r\nthere might be more or fewer batches in the dataset than the `__len__` outputs. \n", "before_files": [{"content": "from __future__ import division\n\nimport math\nimport random\nfrom contextlib import contextmanager\nfrom copy import deepcopy\n\nfrom .batch import Batch\nfrom .dataset import Dataset\n\n\nclass RandomShuffler(object):\n \"\"\"Use random functions while keeping track of the random state to make it\n reproducible and deterministic.\"\"\"\n\n def __init__(self, random_state=None):\n self._random_state = random_state\n if self._random_state is None:\n self._random_state = random.getstate()\n\n @contextmanager\n def use_internal_state(self):\n \"\"\"Use a specific RNG state.\"\"\"\n old_state = random.getstate()\n random.setstate(self._random_state)\n yield\n self._random_state = random.getstate()\n random.setstate(old_state)\n\n @property\n def random_state(self):\n return deepcopy(self._random_state)\n\n @random_state.setter\n def random_state(self, s):\n self._random_state = s\n\n def __call__(self, data):\n \"\"\"Shuffle and return a new list.\"\"\"\n with self.use_internal_state():\n return random.sample(data, len(data))\n\n\nclass Iterator(object):\n \"\"\"Defines an iterator that loads batches of data from a Dataset.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n batch_size_fn: Function of three arguments (new example to add, current\n count of examples in the batch, and current effective batch size)\n that returns the new effective batch size resulting from adding\n that example to a batch. This is useful for dynamic batching, where\n this function would add to the current effective batch size the\n number of tokens in the new example.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n sort_within_batch: Whether to sort (in descending order according to\n self.sort_key) within each batch. If None, defaults to self.sort.\n If self.sort is True and this is False, the batch is left in the\n original (ascending) sorted order.\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n batch_size_fn=lambda new, count, sofar: count, train=True,\n repeat=None, shuffle=None, sort=None,\n sort_within_batch=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n self.batch_size_fn = batch_size_fn\n self.iterations = 0\n self.repeat = train if repeat is None else repeat\n self.shuffle = train if shuffle is None else shuffle\n self.sort = not train if sort is None else sort\n if sort_within_batch is None:\n self.sort_within_batch = self.sort\n else:\n self.sort_within_batch = sort_within_batch\n if sort_key is None:\n self.sort_key = dataset.sort_key\n else:\n self.sort_key = sort_key\n self.device = device\n\n self.random_shuffler = RandomShuffler()\n\n # For state loading/saving only\n self._iterations_this_epoch = 0\n self._random_state_this_epoch = None\n self._restored_from_state = False\n\n @classmethod\n def splits(cls, datasets, batch_sizes=None, **kwargs):\n \"\"\"Create Iterator objects for multiple splits of a dataset.\n\n Arguments:\n datasets: Tuple of Dataset objects corresponding to the splits. The\n first such object should be the train set.\n batch_sizes: Tuple of batch sizes to use for the different splits,\n or None to use the same batch_size for all splits.\n Remaining keyword arguments: Passed to the constructor of the\n iterator class being used.\n \"\"\"\n if batch_sizes is None:\n batch_sizes = [kwargs.pop('batch_size')] * len(datasets)\n ret = []\n for i in range(len(datasets)):\n train = i == 0\n ret.append(cls(\n datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))\n return tuple(ret)\n\n def data(self):\n \"\"\"Return the examples in the dataset in order, sorted, or shuffled.\"\"\"\n if self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n elif self.shuffle:\n xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n else:\n xs = self.dataset\n return xs\n\n def init_epoch(self):\n \"\"\"Set up the batch generator for a new epoch.\"\"\"\n\n if self._restored_from_state:\n self.random_shuffler.random_state = self._random_state_this_epoch\n else:\n self._random_state_this_epoch = self.random_shuffler.random_state\n\n self.create_batches()\n\n if self._restored_from_state:\n self._restored_from_state = False\n else:\n self._iterations_this_epoch = 0\n\n if not self.repeat:\n self.iterations = 0\n\n def create_batches(self):\n self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)\n\n @property\n def epoch(self):\n return self.iterations / len(self)\n\n def __len__(self):\n return math.ceil(len(self.dataset) / self.batch_size)\n\n def __iter__(self):\n while True:\n self.init_epoch()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n if self.sort_within_batch:\n # NOTE: `rnn.pack_padded_sequence` requires that a minibatch\n # be sorted by decreasing order, which requires reversing\n # relative to typical sort keys\n if self.sort:\n minibatch.reverse()\n else:\n minibatch.sort(key=self.sort_key, reverse=True)\n yield Batch(minibatch, self.dataset, self.device,\n self.train)\n if not self.repeat:\n raise StopIteration\n\n def state_dict(self):\n return {\n \"iterations\": self.iterations,\n \"iterations_this_epoch\": self._iterations_this_epoch,\n \"random_state_this_epoch\": self._random_state_this_epoch}\n\n def load_state_dict(self, state_dict):\n self.iterations = state_dict[\"iterations\"]\n self._iterations_this_epoch = state_dict[\"iterations_this_epoch\"]\n self._random_state_this_epoch = state_dict[\"random_state_this_epoch\"]\n self._restored_from_state = True\n\n\nclass BPTTIterator(Iterator):\n \"\"\"Defines an iterator for language modeling tasks that use BPTT.\n\n Provides contiguous streams of examples together with targets that are\n one timestep further forward, for language modeling training with\n backpropagation through time (BPTT). Expects a Dataset with a single\n example and a single field called 'text' and produces Batches with text and\n target attributes.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n bptt_len: Length of sequences for backpropagation through time.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, bptt_len, **kwargs):\n self.bptt_len = bptt_len\n super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)\n\n def __len__(self):\n return math.ceil((len(self.dataset[0].text) / self.batch_size - 1) /\n self.bptt_len)\n\n def __iter__(self):\n text = self.dataset[0].text\n TEXT = self.dataset.fields['text']\n TEXT.eos_token = None\n text = text + ([TEXT.pad_token] * int(math.ceil(len(text) / self.batch_size) *\n self.batch_size - len(text)))\n data = TEXT.numericalize(\n [text], device=self.device, train=self.train)\n data = data.view(self.batch_size, -1).t().contiguous()\n dataset = Dataset(examples=self.dataset.examples, fields=[\n ('text', TEXT), ('target', TEXT)])\n while True:\n for i in range(0, len(self) * self.bptt_len, self.bptt_len):\n seq_len = min(self.bptt_len, len(data) - i - 1)\n yield Batch.fromvars(\n dataset, self.batch_size, train=self.train,\n text=data[i:i + seq_len],\n target=data[i + 1:i + 1 + seq_len])\n if not self.repeat:\n raise StopIteration\n\n\nclass BucketIterator(Iterator):\n \"\"\"Defines an iterator that batches examples of similar lengths together.\n\n Minimizes amount of padding needed while producing freshly shuffled\n batches for each new epoch. See pool for the bucketing procedure used.\n \"\"\"\n\n def create_batches(self):\n if self.sort:\n self.batches = batch(self.data(), self.batch_size,\n self.batch_size_fn)\n else:\n self.batches = pool(self.data(), self.batch_size,\n self.sort_key, self.batch_size_fn,\n random_shuffler=self.random_shuffler)\n\n\ndef batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)\n if minibatch:\n yield minibatch\n\n\ndef pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,\n random_shuffler=None):\n \"\"\"Sort within buckets, then batch, then shuffle batches.\n\n Partitions data into chunks of size 100*batch_size, sorts examples within\n each chunk using sort_key, then batch these examples and shuffle the\n batches.\n \"\"\"\n if random_shuffler is None:\n random_shuffler = random.shuffle\n for p in batch(data, batch_size * 100, batch_size_fn):\n p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n", "path": "torchtext/data/iterator.py"}]}
4,020
328
gh_patches_debug_2582
rasdani/github-patches
git_diff
azavea__raster-vision-1586
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Same explanation for SlidingWindowGeoDataset and RandomWindowGeoDataset ## 📚 Documentation <!-- A clear and concise description of what content in https://docs.rastervision.io/ is an issue.--> > The SlidingWindowGeoDataset allows reading the scene by sampling random window sizes and locations. This description is same to explained both SlidingWindowGeoDataset and RandomWindowGeoDataset. This can be found here: https://docs.rastervision.io/en/latest/tutorials/sampling_training_data.html </issue> <code> [start of rastervision_core/rastervision/core/data/class_config.py] 1 from typing import List, Optional, Tuple, Union 2 3 from rastervision.pipeline.config import (Config, register_config, ConfigError, 4 Field, validator) 5 from rastervision.core.data.utils import color_to_triple, normalize_color 6 7 DEFAULT_NULL_CLASS_NAME = 'null' 8 DEFAULT_NULL_CLASS_COLOR = 'black' 9 10 11 @register_config('class_config') 12 class ClassConfig(Config): 13 """Configures the class names that are being predicted.""" 14 names: List[str] = Field( 15 ..., 16 description='Names of classes. The i-th class in this list will have ' 17 'class ID = i.') 18 colors: Optional[List[Union[str, Tuple]]] = Field( 19 None, 20 description= 21 ('Colors used to visualize classes. Can be color strings accepted by ' 22 'matplotlib or RGB tuples. If None, a random color will be auto-generated ' 23 'for each class.')) 24 null_class: Optional[str] = Field( 25 None, 26 description='Optional name of class in `names` to use as the null ' 27 'class. This is used in semantic segmentation to represent the label ' 28 'for imagery pixels that are NODATA or that are missing a label. ' 29 f'If None and the class names include "{DEFAULT_NULL_CLASS_NAME}", ' 30 'it will automatically be used as the null class. If None, and this ' 31 'Config is part of a SemanticSegmentationConfig, a null class will be ' 32 'added automatically.') 33 34 @validator('colors', always=True) 35 def validate_colors(cls, v: Optional[List[Union[str, Tuple]]], 36 values: dict) -> Optional[List[Union[str, Tuple]]]: 37 """Compare length w/ names. Also auto-generate if not specified.""" 38 class_names = values['names'] 39 class_colors = v 40 if class_colors is None: 41 class_colors = [color_to_triple() for _ in class_names] 42 elif len(class_names) != len(class_colors): 43 raise ConfigError(f'len(class_names) ({len(class_names)}) != ' 44 f'len(class_colors) ({len(class_colors)})\n' 45 f'class_names: {class_names}\n' 46 f'class_colors: {class_colors}') 47 return class_colors 48 49 @validator('null_class', always=True) 50 def validate_null_class(cls, v: Optional[str], 51 values: dict) -> Optional[str]: 52 """Check if in names. If 'null' in names, use it as null class.""" 53 names = values['names'] 54 if v is None: 55 if DEFAULT_NULL_CLASS_NAME in names: 56 v = DEFAULT_NULL_CLASS_NAME 57 else: 58 if v not in names: 59 raise ConfigError( 60 f'The null_class, "{v}", must be in list of class names.') 61 62 # edge case 63 default_null_class_in_names = (DEFAULT_NULL_CLASS_NAME in names) 64 null_class_neq_default = (v != DEFAULT_NULL_CLASS_NAME) 65 if default_null_class_in_names and null_class_neq_default: 66 raise ConfigError( 67 f'"{DEFAULT_NULL_CLASS_NAME}" is in names but the ' 68 f'specified null_class is something else ("{v}").') 69 return v 70 71 def get_class_id(self, name: str) -> int: 72 return self.names.index(name) 73 74 def get_name(self, id: int) -> str: 75 return self.names[id] 76 77 @property 78 def null_class_id(self) -> int: 79 if self.null_class is None: 80 raise ValueError('null_class is not set') 81 return self.get_class_id(self.null_class) 82 83 def get_color_to_class_id(self) -> dict: 84 return dict([(self.colors[i], i) for i in range(len(self.colors))]) 85 86 def ensure_null_class(self) -> None: 87 """Add a null class if one isn't set. This method is idempotent.""" 88 if self.null_class is not None: 89 return 90 91 null_class_name = DEFAULT_NULL_CLASS_NAME 92 null_class_color = DEFAULT_NULL_CLASS_COLOR 93 94 # This might seeem redundant given the null class validator above, but 95 # is actually important. Sometimes there can be multiple ClassConfig 96 # instances that reference the same list objects for names and colors 97 # (not clear why this happens). This means that 98 # each ensure_null_class() call will add to names and colors in each 99 # copy of ClassConfig but only set its own null_class, which makes this 100 # method() non-idempotent. 101 if null_class_name in self.names: 102 self.null_class = null_class_name 103 return 104 105 # use random color if default color is already taken 106 null_class_color_triple = color_to_triple(null_class_color) 107 all_color_triples = [ 108 color_to_triple(c) if isinstance(c, str) else c 109 for c in self.colors 110 ] 111 if null_class_color_triple in all_color_triples: 112 null_class_color = color_to_triple() 113 114 self.names.append(null_class_name) 115 self.colors.append(null_class_color) 116 self.null_class = null_class_name 117 118 def __len__(self) -> int: 119 return len(self.names) 120 121 @property 122 def color_triples(self) -> List[Tuple[float, float, float]]: 123 color_triples = [normalize_color(c) for c in self.colors] 124 return color_triples 125 [end of rastervision_core/rastervision/core/data/class_config.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rastervision_core/rastervision/core/data/class_config.py b/rastervision_core/rastervision/core/data/class_config.py --- a/rastervision_core/rastervision/core/data/class_config.py +++ b/rastervision_core/rastervision/core/data/class_config.py @@ -120,5 +120,6 @@ @property def color_triples(self) -> List[Tuple[float, float, float]]: + """Class colors in a normalized form.""" color_triples = [normalize_color(c) for c in self.colors] return color_triples
{"golden_diff": "diff --git a/rastervision_core/rastervision/core/data/class_config.py b/rastervision_core/rastervision/core/data/class_config.py\n--- a/rastervision_core/rastervision/core/data/class_config.py\n+++ b/rastervision_core/rastervision/core/data/class_config.py\n@@ -120,5 +120,6 @@\n \n @property\n def color_triples(self) -> List[Tuple[float, float, float]]:\n+ \"\"\"Class colors in a normalized form.\"\"\"\n color_triples = [normalize_color(c) for c in self.colors]\n return color_triples\n", "issue": "Same explanation for SlidingWindowGeoDataset and RandomWindowGeoDataset\n## \ud83d\udcda Documentation\r\n\r\n<!-- A clear and concise description of what content in https://docs.rastervision.io/ is an issue.-->\r\n\r\n> The SlidingWindowGeoDataset allows reading the scene by sampling random window sizes and locations.\r\n\r\nThis description is same to explained both SlidingWindowGeoDataset and RandomWindowGeoDataset. This can be found here: https://docs.rastervision.io/en/latest/tutorials/sampling_training_data.html\n", "before_files": [{"content": "from typing import List, Optional, Tuple, Union\n\nfrom rastervision.pipeline.config import (Config, register_config, ConfigError,\n Field, validator)\nfrom rastervision.core.data.utils import color_to_triple, normalize_color\n\nDEFAULT_NULL_CLASS_NAME = 'null'\nDEFAULT_NULL_CLASS_COLOR = 'black'\n\n\n@register_config('class_config')\nclass ClassConfig(Config):\n \"\"\"Configures the class names that are being predicted.\"\"\"\n names: List[str] = Field(\n ...,\n description='Names of classes. The i-th class in this list will have '\n 'class ID = i.')\n colors: Optional[List[Union[str, Tuple]]] = Field(\n None,\n description=\n ('Colors used to visualize classes. Can be color strings accepted by '\n 'matplotlib or RGB tuples. If None, a random color will be auto-generated '\n 'for each class.'))\n null_class: Optional[str] = Field(\n None,\n description='Optional name of class in `names` to use as the null '\n 'class. This is used in semantic segmentation to represent the label '\n 'for imagery pixels that are NODATA or that are missing a label. '\n f'If None and the class names include \"{DEFAULT_NULL_CLASS_NAME}\", '\n 'it will automatically be used as the null class. If None, and this '\n 'Config is part of a SemanticSegmentationConfig, a null class will be '\n 'added automatically.')\n\n @validator('colors', always=True)\n def validate_colors(cls, v: Optional[List[Union[str, Tuple]]],\n values: dict) -> Optional[List[Union[str, Tuple]]]:\n \"\"\"Compare length w/ names. Also auto-generate if not specified.\"\"\"\n class_names = values['names']\n class_colors = v\n if class_colors is None:\n class_colors = [color_to_triple() for _ in class_names]\n elif len(class_names) != len(class_colors):\n raise ConfigError(f'len(class_names) ({len(class_names)}) != '\n f'len(class_colors) ({len(class_colors)})\\n'\n f'class_names: {class_names}\\n'\n f'class_colors: {class_colors}')\n return class_colors\n\n @validator('null_class', always=True)\n def validate_null_class(cls, v: Optional[str],\n values: dict) -> Optional[str]:\n \"\"\"Check if in names. If 'null' in names, use it as null class.\"\"\"\n names = values['names']\n if v is None:\n if DEFAULT_NULL_CLASS_NAME in names:\n v = DEFAULT_NULL_CLASS_NAME\n else:\n if v not in names:\n raise ConfigError(\n f'The null_class, \"{v}\", must be in list of class names.')\n\n # edge case\n default_null_class_in_names = (DEFAULT_NULL_CLASS_NAME in names)\n null_class_neq_default = (v != DEFAULT_NULL_CLASS_NAME)\n if default_null_class_in_names and null_class_neq_default:\n raise ConfigError(\n f'\"{DEFAULT_NULL_CLASS_NAME}\" is in names but the '\n f'specified null_class is something else (\"{v}\").')\n return v\n\n def get_class_id(self, name: str) -> int:\n return self.names.index(name)\n\n def get_name(self, id: int) -> str:\n return self.names[id]\n\n @property\n def null_class_id(self) -> int:\n if self.null_class is None:\n raise ValueError('null_class is not set')\n return self.get_class_id(self.null_class)\n\n def get_color_to_class_id(self) -> dict:\n return dict([(self.colors[i], i) for i in range(len(self.colors))])\n\n def ensure_null_class(self) -> None:\n \"\"\"Add a null class if one isn't set. This method is idempotent.\"\"\"\n if self.null_class is not None:\n return\n\n null_class_name = DEFAULT_NULL_CLASS_NAME\n null_class_color = DEFAULT_NULL_CLASS_COLOR\n\n # This might seeem redundant given the null class validator above, but\n # is actually important. Sometimes there can be multiple ClassConfig\n # instances that reference the same list objects for names and colors\n # (not clear why this happens). This means that\n # each ensure_null_class() call will add to names and colors in each\n # copy of ClassConfig but only set its own null_class, which makes this\n # method() non-idempotent.\n if null_class_name in self.names:\n self.null_class = null_class_name\n return\n\n # use random color if default color is already taken\n null_class_color_triple = color_to_triple(null_class_color)\n all_color_triples = [\n color_to_triple(c) if isinstance(c, str) else c\n for c in self.colors\n ]\n if null_class_color_triple in all_color_triples:\n null_class_color = color_to_triple()\n\n self.names.append(null_class_name)\n self.colors.append(null_class_color)\n self.null_class = null_class_name\n\n def __len__(self) -> int:\n return len(self.names)\n\n @property\n def color_triples(self) -> List[Tuple[float, float, float]]:\n color_triples = [normalize_color(c) for c in self.colors]\n return color_triples\n", "path": "rastervision_core/rastervision/core/data/class_config.py"}]}
2,077
136
gh_patches_debug_67232
rasdani/github-patches
git_diff
pypi__warehouse-8550
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> /pypi/{package}/{version}/json: yanking an older release updates latest release's yanked_reason field **Describe the bug** Yanking an older version of a package leads to unexpected side-effect for latest version's package info provided via the JSON endpoint. In particular, the `yanked_reason` field gets updated. **Expected behavior** When yanking a version of a package, no other verision's `yanked_reason` field should be updated. **To Reproduce** 1. Create new package on test.pypi.org 2. Release version `0.2.0`. 3. Release version `0.3.0`. 4. Yank version `0.2.0`. 5. Check json endpoint of package version `0.3.0`. ```console $ curl -sL https://test.pypi.org/pypi/abn-test-rss-yank/0.3.0/json | jq '.info.yanked' false $ curl -sL https://test.pypi.org/pypi/abn-test-rss-yank/0.3.0/json | jq '.info.yanked_reason' "Testing Yank" ``` **My Platform** N/A **Additional context** * Test package: https://test.pypi.org/project/abn-test-rss-yank/ </issue> <code> [start of warehouse/legacy/api/json.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 from collections import OrderedDict 14 15 from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound 16 from pyramid.view import view_config 17 from sqlalchemy.orm import Load 18 from sqlalchemy.orm.exc import NoResultFound 19 20 from warehouse.cache.http import cache_control 21 from warehouse.cache.origin import origin_cache 22 from warehouse.packaging.models import File, Project, Release 23 24 # Generate appropriate CORS headers for the JSON endpoint. 25 # We want to allow Cross-Origin requests here so that users can interact 26 # with these endpoints via XHR/Fetch APIs in the browser. 27 _CORS_HEADERS = { 28 "Access-Control-Allow-Origin": "*", 29 "Access-Control-Allow-Headers": ", ".join( 30 [ 31 "Content-Type", 32 "If-Match", 33 "If-Modified-Since", 34 "If-None-Match", 35 "If-Unmodified-Since", 36 ] 37 ), 38 "Access-Control-Allow-Methods": "GET", 39 "Access-Control-Max-Age": "86400", # 1 day. 40 "Access-Control-Expose-Headers": ", ".join(["X-PyPI-Last-Serial"]), 41 } 42 43 _CACHE_DECORATOR = [ 44 cache_control(15 * 60), # 15 minutes 45 origin_cache( 46 1 * 24 * 60 * 60, # 1 day 47 stale_while_revalidate=5 * 60, # 5 minutes 48 stale_if_error=1 * 24 * 60 * 60, # 1 day 49 ), 50 ] 51 52 53 @view_config( 54 route_name="legacy.api.json.project", 55 context=Project, 56 renderer="json", 57 decorator=_CACHE_DECORATOR, 58 ) 59 def json_project(project, request): 60 if project.name != request.matchdict.get("name", project.name): 61 return HTTPMovedPermanently( 62 request.current_route_path(name=project.name), headers=_CORS_HEADERS 63 ) 64 65 try: 66 release = ( 67 request.db.query(Release) 68 .filter(Release.project == project, Release.yanked.is_(False)) 69 .order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc()) 70 .limit(1) 71 .one() 72 ) 73 except NoResultFound: 74 return HTTPNotFound(headers=_CORS_HEADERS) 75 76 return json_release(release, request) 77 78 79 @view_config( 80 route_name="legacy.api.json.project_slash", 81 context=Project, 82 decorator=_CACHE_DECORATOR, 83 ) 84 def json_project_slash(project, request): 85 return HTTPMovedPermanently( 86 # Respond with redirect to url without trailing slash 87 request.route_path("legacy.api.json.project", name=project.name), 88 headers=_CORS_HEADERS, 89 ) 90 91 92 @view_config( 93 route_name="legacy.api.json.release", 94 context=Release, 95 renderer="json", 96 decorator=_CACHE_DECORATOR, 97 ) 98 def json_release(release, request): 99 project = release.project 100 101 if project.name != request.matchdict.get("name", project.name): 102 return HTTPMovedPermanently( 103 request.current_route_path(name=project.name), headers=_CORS_HEADERS 104 ) 105 106 # Apply CORS headers. 107 request.response.headers.update(_CORS_HEADERS) 108 109 # Get the latest serial number for this project. 110 request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial) 111 112 # Get all of the releases and files for this project. 113 release_files = ( 114 request.db.query(Release, File) 115 .options( 116 Load(Release).load_only( 117 "version", "requires_python", "yanked", "yanked_reason" 118 ) 119 ) 120 .outerjoin(File) 121 .filter(Release.project == project) 122 .order_by(Release._pypi_ordering.desc(), File.filename) 123 .all() 124 ) 125 126 # Map our releases + files into a dictionary that maps each release to a 127 # list of all its files. 128 releases = {} 129 for r, file_ in release_files: 130 files = releases.setdefault(r, []) 131 if file_ is not None: 132 files.append(file_) 133 134 # Serialize our database objects to match the way that PyPI legacy 135 # presented this data. 136 releases = { 137 r.version: [ 138 { 139 "filename": f.filename, 140 "packagetype": f.packagetype, 141 "python_version": f.python_version, 142 "has_sig": f.has_signature, 143 "comment_text": f.comment_text, 144 "md5_digest": f.md5_digest, 145 "digests": {"md5": f.md5_digest, "sha256": f.sha256_digest}, 146 "size": f.size, 147 # TODO: Remove this once we've had a long enough time with it 148 # here to consider it no longer in use. 149 "downloads": -1, 150 "upload_time": f.upload_time.strftime("%Y-%m-%dT%H:%M:%S"), 151 "upload_time_iso_8601": f.upload_time.isoformat() + "Z", 152 "url": request.route_url("packaging.file", path=f.path), 153 "requires_python": r.requires_python if r.requires_python else None, 154 "yanked": r.yanked, 155 "yanked_reason": r.yanked_reason or None, 156 } 157 for f in fs 158 ] 159 for r, fs in releases.items() 160 } 161 162 return { 163 "info": { 164 "name": project.name, 165 "version": release.version, 166 "summary": release.summary, 167 "description_content_type": release.description.content_type, 168 "description": release.description.raw, 169 "keywords": release.keywords, 170 "license": release.license, 171 "classifiers": list(release.classifiers), 172 "author": release.author, 173 "author_email": release.author_email, 174 "maintainer": release.maintainer, 175 "maintainer_email": release.maintainer_email, 176 "requires_python": release.requires_python, 177 "platform": release.platform, 178 "downloads": {"last_day": -1, "last_week": -1, "last_month": -1}, 179 "package_url": request.route_url("packaging.project", name=project.name), 180 "project_url": request.route_url("packaging.project", name=project.name), 181 "project_urls": OrderedDict(release.urls) if release.urls else None, 182 "release_url": request.route_url( 183 "packaging.release", name=project.name, version=release.version 184 ), 185 "requires_dist": ( 186 list(release.requires_dist) if release.requires_dist else None 187 ), 188 "docs_url": project.documentation_url, 189 "bugtrack_url": None, 190 "home_page": release.home_page, 191 "download_url": release.download_url, 192 "yanked": release.yanked, 193 "yanked_reason": r.yanked_reason or None, 194 }, 195 "urls": releases[release.version], 196 "releases": releases, 197 "last_serial": project.last_serial, 198 } 199 200 201 @view_config( 202 route_name="legacy.api.json.release_slash", 203 context=Release, 204 decorator=_CACHE_DECORATOR, 205 ) 206 def json_release_slash(release, request): 207 return HTTPMovedPermanently( 208 # Respond with redirect to url without trailing slash 209 request.route_path( 210 "legacy.api.json.release", 211 name=release.project.name, 212 version=release.version, 213 ), 214 headers=_CORS_HEADERS, 215 ) 216 [end of warehouse/legacy/api/json.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py --- a/warehouse/legacy/api/json.py +++ b/warehouse/legacy/api/json.py @@ -190,7 +190,7 @@ "home_page": release.home_page, "download_url": release.download_url, "yanked": release.yanked, - "yanked_reason": r.yanked_reason or None, + "yanked_reason": release.yanked_reason or None, }, "urls": releases[release.version], "releases": releases,
{"golden_diff": "diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py\n--- a/warehouse/legacy/api/json.py\n+++ b/warehouse/legacy/api/json.py\n@@ -190,7 +190,7 @@\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n \"yanked\": release.yanked,\n- \"yanked_reason\": r.yanked_reason or None,\n+ \"yanked_reason\": release.yanked_reason or None,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n", "issue": "/pypi/{package}/{version}/json: yanking an older release updates latest release's yanked_reason field\n**Describe the bug**\r\nYanking an older version of a package leads to unexpected side-effect for latest version's package info provided via the JSON endpoint. In particular, the `yanked_reason` field gets updated.\r\n\r\n**Expected behavior**\r\nWhen yanking a version of a package, no other verision's `yanked_reason` field should be updated.\r\n\r\n**To Reproduce**\r\n1. Create new package on test.pypi.org\r\n2. Release version `0.2.0`.\r\n3. Release version `0.3.0`.\r\n4. Yank version `0.2.0`.\r\n5. Check json endpoint of package version `0.3.0`.\r\n\r\n```console\r\n$ curl -sL https://test.pypi.org/pypi/abn-test-rss-yank/0.3.0/json | jq '.info.yanked'\r\nfalse\r\n$ curl -sL https://test.pypi.org/pypi/abn-test-rss-yank/0.3.0/json | jq '.info.yanked_reason'\r\n\"Testing Yank\"\r\n```\r\n\r\n**My Platform**\r\nN/A\r\n\r\n**Additional context**\r\n* Test package: https://test.pypi.org/project/abn-test-rss-yank/\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import Load\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import File, Project, Release\n\n# Generate appropriate CORS headers for the JSON endpoint.\n# We want to allow Cross-Origin requests here so that users can interact\n# with these endpoints via XHR/Fetch APIs in the browser.\n_CORS_HEADERS = {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \", \".join(\n [\n \"Content-Type\",\n \"If-Match\",\n \"If-Modified-Since\",\n \"If-None-Match\",\n \"If-Unmodified-Since\",\n ]\n ),\n \"Access-Control-Allow-Methods\": \"GET\",\n \"Access-Control-Max-Age\": \"86400\", # 1 day.\n \"Access-Control-Expose-Headers\": \", \".join([\"X-PyPI-Last-Serial\"]),\n}\n\n_CACHE_DECORATOR = [\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n]\n\n\n@view_config(\n route_name=\"legacy.api.json.project\",\n context=Project,\n renderer=\"json\",\n decorator=_CACHE_DECORATOR,\n)\ndef json_project(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project, Release.yanked.is_(False))\n .order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n return HTTPNotFound(headers=_CORS_HEADERS)\n\n return json_release(release, request)\n\n\n@view_config(\n route_name=\"legacy.api.json.project_slash\",\n context=Project,\n decorator=_CACHE_DECORATOR,\n)\ndef json_project_slash(project, request):\n return HTTPMovedPermanently(\n # Respond with redirect to url without trailing slash\n request.route_path(\"legacy.api.json.project\", name=project.name),\n headers=_CORS_HEADERS,\n )\n\n\n@view_config(\n route_name=\"legacy.api.json.release\",\n context=Release,\n renderer=\"json\",\n decorator=_CACHE_DECORATOR,\n)\ndef json_release(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n # Apply CORS headers.\n request.response.headers.update(_CORS_HEADERS)\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the releases and files for this project.\n release_files = (\n request.db.query(Release, File)\n .options(\n Load(Release).load_only(\n \"version\", \"requires_python\", \"yanked\", \"yanked_reason\"\n )\n )\n .outerjoin(File)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc(), File.filename)\n .all()\n )\n\n # Map our releases + files into a dictionary that maps each release to a\n # list of all its files.\n releases = {}\n for r, file_ in release_files:\n files = releases.setdefault(r, [])\n if file_ is not None:\n files.append(file_)\n\n # Serialize our database objects to match the way that PyPI legacy\n # presented this data.\n releases = {\n r.version: [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"has_sig\": f.has_signature,\n \"comment_text\": f.comment_text,\n \"md5_digest\": f.md5_digest,\n \"digests\": {\"md5\": f.md5_digest, \"sha256\": f.sha256_digest},\n \"size\": f.size,\n # TODO: Remove this once we've had a long enough time with it\n # here to consider it no longer in use.\n \"downloads\": -1,\n \"upload_time\": f.upload_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"upload_time_iso_8601\": f.upload_time.isoformat() + \"Z\",\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n \"requires_python\": r.requires_python if r.requires_python else None,\n \"yanked\": r.yanked,\n \"yanked_reason\": r.yanked_reason or None,\n }\n for f in fs\n ]\n for r, fs in releases.items()\n }\n\n return {\n \"info\": {\n \"name\": project.name,\n \"version\": release.version,\n \"summary\": release.summary,\n \"description_content_type\": release.description.content_type,\n \"description\": release.description.raw,\n \"keywords\": release.keywords,\n \"license\": release.license,\n \"classifiers\": list(release.classifiers),\n \"author\": release.author,\n \"author_email\": release.author_email,\n \"maintainer\": release.maintainer,\n \"maintainer_email\": release.maintainer_email,\n \"requires_python\": release.requires_python,\n \"platform\": release.platform,\n \"downloads\": {\"last_day\": -1, \"last_week\": -1, \"last_month\": -1},\n \"package_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_urls\": OrderedDict(release.urls) if release.urls else None,\n \"release_url\": request.route_url(\n \"packaging.release\", name=project.name, version=release.version\n ),\n \"requires_dist\": (\n list(release.requires_dist) if release.requires_dist else None\n ),\n \"docs_url\": project.documentation_url,\n \"bugtrack_url\": None,\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n \"yanked\": release.yanked,\n \"yanked_reason\": r.yanked_reason or None,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n \"last_serial\": project.last_serial,\n }\n\n\n@view_config(\n route_name=\"legacy.api.json.release_slash\",\n context=Release,\n decorator=_CACHE_DECORATOR,\n)\ndef json_release_slash(release, request):\n return HTTPMovedPermanently(\n # Respond with redirect to url without trailing slash\n request.route_path(\n \"legacy.api.json.release\",\n name=release.project.name,\n version=release.version,\n ),\n headers=_CORS_HEADERS,\n )\n", "path": "warehouse/legacy/api/json.py"}]}
3,071
127
gh_patches_debug_10054
rasdani/github-patches
git_diff
acl-org__acl-anthology-990
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Recaser bug: adding fixed-case inside tex-math markup `<tex-math><fixed-case>O</fixed-case>(<fixed-case>M</fixed-case>(n^2))</tex-math>` caused the build to fail in #892 </issue> <code> [start of bin/fixedcase/protect.py] 1 #!/usr/bin/env python3 2 3 # protect.py <infile> <outfile> 4 # looks for file "truelist" in current dir 5 6 # cd data/xml 7 # for i in *xml ; do (cd ../../tools/fixedcase/ ; python3 ./protect.py ../../data/xml/$i /tmp/$i ; echo $i ); done > log 8 9 10 import lxml.etree as ET 11 import os 12 import sys 13 import copy 14 import itertools 15 import inspect 16 17 from collections import defaultdict 18 19 if __name__ == "__main__": 20 from common import * 21 else: 22 from .common import * 23 24 # recursive helper called by protect 25 # protect text of "node", including children, and tails of children 26 def protect_recurse(node, recased): 27 if node.tag == "fixed-case": # already protected 28 newnode = copy.deepcopy(node) # don't need to modify descendents 29 newnode.tail = None # tail will be protected by caller 30 return newnode 31 newnode = ET.Element(node.tag, node.attrib) 32 33 def process(text, rc): 34 i = 0 35 for upper, chars in itertools.groupby(rc[: len(text)], lambda c: c.isupper()): 36 charstr = "".join(chars) 37 if upper: 38 p = ET.Element("fixed-case") 39 p.text = charstr 40 newnode.append(p) 41 else: 42 append_text(newnode, text[i : i + len(charstr)]) 43 44 assert text[i : i + len(charstr)].lower() == charstr.lower(), ( 45 i, 46 text, 47 charstr, 48 ) 49 i += len(charstr) 50 51 if node.text: 52 process(node.text, recased) 53 recased = recased[len(node.text) :] 54 for child in node: 55 protected_child = protect_recurse(child, recased) 56 recased = recased[len(get_text(protected_child)) :] 57 newnode.append(protected_child) 58 if child.tail: 59 process(child.tail, recased) 60 recased = recased[len(child.tail) :] 61 62 return newnode 63 64 65 def protect(node): 66 rawtext = get_text(node).strip() 67 recased = None 68 if rawtext.lower() in special_titles: 69 recased = special_titles[rawtext.lower()] 70 else: 71 text = tokenize(rawtext) 72 fixed = fixedcase_title( 73 text, 74 truelist=truelist, 75 phrase_truelist=phrase_truelist, 76 amodifiers=amodifiers, 77 ndescriptors=ndescriptors, 78 ) 79 if any(fixed): 80 # Generate the recased string so we know where to look in the XML 81 # to apply fixed-case 82 recasedtoks = [(w if b else w.lower()) for w, b in zip(text, fixed)] 83 recased = TreebankWordDetokenizer().detokenize(recasedtoks) 84 # PTB (de)tokenizer doesn't think of hyphens as separate tokens, 85 # so we need to manually detokenize them. 86 # Assuming the only edits that need to be made are adding/deleting 87 # spaces, the following will work: 88 i = 0 89 while i < len(rawtext): 90 # scan rawtext from left to right and adjust recased by adding/removing 91 # spaces until it matches 92 t = rawtext[i] 93 assert i < len(recased), ((i, t), rawtext, recased) 94 c = recased[i] 95 if t.isspace() and not c.isspace(): # may be ' ' or '\n' 96 # add space to recased 97 recased = recased[:i] + t + recased[i:] 98 i += 1 99 elif c.isspace() and not t.isspace(): 100 # remove space from recased 101 recased = recased[:i] + recased[i + 1 :] 102 # don't increment i 103 elif t != c and t.isspace() and c.isspace(): 104 recased = recased[:i] + t + recased[i + 1 :] 105 i += 1 106 else: 107 assert t == c or t.lower() == c.lower(), ( 108 (i, t, c), 109 rawtext, 110 recased, 111 text, 112 ) 113 i += 1 114 if len(recased) > len(rawtext): 115 recased = recased[: len(rawtext)] 116 assert rawtext.lower() == recased.lower(), (rawtext, recased) 117 118 if recased: 119 newnode = protect_recurse(node, recased) 120 newnode.tail = node.tail # tail of top level is not protected 121 replace_node(node, newnode) 122 123 124 # Read in the truelist (list of words that should always be protected) 125 truelist, phrase_truelist, special_titles, amodifiers, ndescriptors = load_lists() 126 127 if __name__ == "__main__": 128 infile, outfile = sys.argv[1:] 129 130 tree = ET.parse(infile) 131 if not tree.getroot().tail: 132 tree.getroot().tail = "\n" 133 for paper in tree.getroot().findall(".//paper"): 134 for title in paper.xpath("./title|./booktitle"): 135 protect(title) 136 tree.write(outfile, encoding="UTF-8", xml_declaration=True) 137 [end of bin/fixedcase/protect.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bin/fixedcase/protect.py b/bin/fixedcase/protect.py --- a/bin/fixedcase/protect.py +++ b/bin/fixedcase/protect.py @@ -24,7 +24,7 @@ # recursive helper called by protect # protect text of "node", including children, and tails of children def protect_recurse(node, recased): - if node.tag == "fixed-case": # already protected + if node.tag in ("fixed-case", "tex-math"): # already protected text, or math newnode = copy.deepcopy(node) # don't need to modify descendents newnode.tail = None # tail will be protected by caller return newnode
{"golden_diff": "diff --git a/bin/fixedcase/protect.py b/bin/fixedcase/protect.py\n--- a/bin/fixedcase/protect.py\n+++ b/bin/fixedcase/protect.py\n@@ -24,7 +24,7 @@\n # recursive helper called by protect\n # protect text of \"node\", including children, and tails of children\n def protect_recurse(node, recased):\n- if node.tag == \"fixed-case\": # already protected\n+ if node.tag in (\"fixed-case\", \"tex-math\"): # already protected text, or math\n newnode = copy.deepcopy(node) # don't need to modify descendents\n newnode.tail = None # tail will be protected by caller\n return newnode\n", "issue": "Recaser bug: adding fixed-case inside tex-math markup\n`<tex-math><fixed-case>O</fixed-case>(<fixed-case>M</fixed-case>(n^2))</tex-math>` caused the build to fail in #892\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# protect.py <infile> <outfile>\n# looks for file \"truelist\" in current dir\n\n# cd data/xml\n# for i in *xml ; do (cd ../../tools/fixedcase/ ; python3 ./protect.py ../../data/xml/$i /tmp/$i ; echo $i ); done > log\n\n\nimport lxml.etree as ET\nimport os\nimport sys\nimport copy\nimport itertools\nimport inspect\n\nfrom collections import defaultdict\n\nif __name__ == \"__main__\":\n from common import *\nelse:\n from .common import *\n\n# recursive helper called by protect\n# protect text of \"node\", including children, and tails of children\ndef protect_recurse(node, recased):\n if node.tag == \"fixed-case\": # already protected\n newnode = copy.deepcopy(node) # don't need to modify descendents\n newnode.tail = None # tail will be protected by caller\n return newnode\n newnode = ET.Element(node.tag, node.attrib)\n\n def process(text, rc):\n i = 0\n for upper, chars in itertools.groupby(rc[: len(text)], lambda c: c.isupper()):\n charstr = \"\".join(chars)\n if upper:\n p = ET.Element(\"fixed-case\")\n p.text = charstr\n newnode.append(p)\n else:\n append_text(newnode, text[i : i + len(charstr)])\n\n assert text[i : i + len(charstr)].lower() == charstr.lower(), (\n i,\n text,\n charstr,\n )\n i += len(charstr)\n\n if node.text:\n process(node.text, recased)\n recased = recased[len(node.text) :]\n for child in node:\n protected_child = protect_recurse(child, recased)\n recased = recased[len(get_text(protected_child)) :]\n newnode.append(protected_child)\n if child.tail:\n process(child.tail, recased)\n recased = recased[len(child.tail) :]\n\n return newnode\n\n\ndef protect(node):\n rawtext = get_text(node).strip()\n recased = None\n if rawtext.lower() in special_titles:\n recased = special_titles[rawtext.lower()]\n else:\n text = tokenize(rawtext)\n fixed = fixedcase_title(\n text,\n truelist=truelist,\n phrase_truelist=phrase_truelist,\n amodifiers=amodifiers,\n ndescriptors=ndescriptors,\n )\n if any(fixed):\n # Generate the recased string so we know where to look in the XML\n # to apply fixed-case\n recasedtoks = [(w if b else w.lower()) for w, b in zip(text, fixed)]\n recased = TreebankWordDetokenizer().detokenize(recasedtoks)\n # PTB (de)tokenizer doesn't think of hyphens as separate tokens,\n # so we need to manually detokenize them.\n # Assuming the only edits that need to be made are adding/deleting\n # spaces, the following will work:\n i = 0\n while i < len(rawtext):\n # scan rawtext from left to right and adjust recased by adding/removing\n # spaces until it matches\n t = rawtext[i]\n assert i < len(recased), ((i, t), rawtext, recased)\n c = recased[i]\n if t.isspace() and not c.isspace(): # may be ' ' or '\\n'\n # add space to recased\n recased = recased[:i] + t + recased[i:]\n i += 1\n elif c.isspace() and not t.isspace():\n # remove space from recased\n recased = recased[:i] + recased[i + 1 :]\n # don't increment i\n elif t != c and t.isspace() and c.isspace():\n recased = recased[:i] + t + recased[i + 1 :]\n i += 1\n else:\n assert t == c or t.lower() == c.lower(), (\n (i, t, c),\n rawtext,\n recased,\n text,\n )\n i += 1\n if len(recased) > len(rawtext):\n recased = recased[: len(rawtext)]\n assert rawtext.lower() == recased.lower(), (rawtext, recased)\n\n if recased:\n newnode = protect_recurse(node, recased)\n newnode.tail = node.tail # tail of top level is not protected\n replace_node(node, newnode)\n\n\n# Read in the truelist (list of words that should always be protected)\ntruelist, phrase_truelist, special_titles, amodifiers, ndescriptors = load_lists()\n\nif __name__ == \"__main__\":\n infile, outfile = sys.argv[1:]\n\n tree = ET.parse(infile)\n if not tree.getroot().tail:\n tree.getroot().tail = \"\\n\"\n for paper in tree.getroot().findall(\".//paper\"):\n for title in paper.xpath(\"./title|./booktitle\"):\n protect(title)\n tree.write(outfile, encoding=\"UTF-8\", xml_declaration=True)\n", "path": "bin/fixedcase/protect.py"}]}
2,045
161
gh_patches_debug_47339
rasdani/github-patches
git_diff
enthought__chaco-904
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Wx BitmapFromBufferRGBA deprecated in favor of Bitmap.FromBufferRGBA ``` c:\users\rporuri\work\github\ets\chaco\chaco\tools\toolbars\toolbar_buttons.py:190: wxPyDeprecationWarning: Call to deprecated item BitmapFromBufferRGBA. Use :meth:`wx.Bitmap.FromBufferRGBA` instead. width + 1, height + 1, gc.bmp_array.flatten() ``` Discovered when running `examples/demo/depth.py` </issue> <code> [start of chaco/tools/toolbars/toolbar_buttons.py] 1 # (C) Copyright 2005-2021 Enthought, Inc., Austin, TX 2 # All rights reserved. 3 # 4 # This software is provided without warranty under the terms of the BSD 5 # license included in LICENSE.txt and may be redistributed only under 6 # the conditions described in the aforementioned license. The license 7 # is also available online at http://www.enthought.com/licenses/BSD.txt 8 # 9 # Thanks for using Enthought open source! 10 11 import numpy 12 13 from traits.etsconfig.api import ETSConfig 14 from enable.tools.toolbars.toolbar_buttons import Button 15 from chaco.tools.zoom_tool import ZoomTool 16 from chaco.plot_graphics_context import PlotGraphicsContext 17 from kiva.image import Image 18 from pyface.image_resource import ImageResource 19 from pyface.api import FileDialog, OK, error 20 from traits.api import ( 21 Instance, 22 Str, 23 Property, 24 cached_property, 25 List, 26 Int, 27 Enum, 28 ) 29 30 31 class ToolbarButton(Button): 32 image = Str() 33 _image = Instance(Image) 34 35 color = "black" 36 37 width = Property(Int, observe="label, image") 38 height = Property(Int, observe="label, image") 39 40 # bounds are used for hit testing 41 bounds = Property(List, observe="label, image") 42 43 def __init__(self, *args, **kw): 44 super().__init__(*args, **kw) 45 46 image_resource = ImageResource(self.image) 47 self._image = Image(image_resource.absolute_path) 48 49 @cached_property 50 def _get_width(self): 51 gc = PlotGraphicsContext((100, 100), dpi=72) 52 gc.set_font(self.label_font) 53 (w, h, descent, leading) = gc.get_full_text_extent(self.label) 54 return max(self._image.width(), w) 55 56 @cached_property 57 def _get_height(self): 58 gc = PlotGraphicsContext((100, 100), dpi=72) 59 gc.set_font(self.label_font) 60 (w, h, descent, leading) = gc.get_full_text_extent(self.label) 61 return self._image.height() + h 62 63 @cached_property 64 def _get_bounds(self): 65 return [self.width, self.height] 66 67 def _draw_actual_button(self, gc): 68 x_offset = self.x + (self.width - self._image.width()) / 2 69 gc.draw_image( 70 self._image, 71 (x_offset, self.y + 2, self._image.width(), self._image.height()), 72 ) 73 74 if self.label is not None and len(self.label) > 0: 75 gc.set_font(self.label_font) 76 77 (w, h, descent, leading) = gc.get_full_text_extent(self.label) 78 if w < self.width: 79 x_offset = self.x + (self.width - w) / 2 80 else: 81 x_offset = self.x 82 83 gc.set_text_position(x_offset, self.y - 8) 84 gc.show_text(self.label) 85 86 87 class IndexAxisLogButton(ToolbarButton): 88 label = "X Log Scale" 89 tooltip = "Change index axis scale" 90 image = "zoom-fit-width" 91 92 def perform(self, event): 93 if self.container.component.index_scale == "linear": 94 self.container.component.index_scale = "log" 95 else: 96 self.container.component.index_scale = "linear" 97 self.container.request_redraw() 98 99 100 class ValueAxisLogButton(ToolbarButton): 101 label = "Y Log Scale" 102 tooltip = "Change value axis scale" 103 image = "zoom-fit-height" 104 105 def perform(self, event): 106 if self.container.component.value_scale == "linear": 107 self.container.component.value_scale = "log" 108 else: 109 self.container.component.value_scale = "linear" 110 self.container.request_redraw() 111 112 113 class ZoomResetButton(ToolbarButton): 114 label = "Zoom Reset" 115 tooltip = "Zoom Reset" 116 image = "zoom-original" 117 118 def perform(self, event): 119 plot_component = self.container.component 120 121 for overlay in plot_component.overlays: 122 if isinstance(overlay, ZoomTool): 123 overlay._reset_state_pressed() 124 125 self.container.request_redraw() 126 127 128 class SaveAsButton(ToolbarButton): 129 label = "Save As" 130 tooltip = "Save As" 131 image = "document-save" 132 133 def perform(self, event): 134 135 plot_component = self.container.component 136 137 filter = "PNG file (*.png)|*.png|\nTIFF file (*.tiff)|*.tiff|" 138 dialog = FileDialog(action="save as", wildcard=filter) 139 140 if dialog.open() != OK: 141 return 142 143 # Remove the toolbar before saving the plot, so the output doesn't 144 # include the toolbar. 145 plot_component.remove_toolbar() 146 147 filename = dialog.path 148 149 width, height = plot_component.outer_bounds 150 151 gc = PlotGraphicsContext((width, height), dpi=72) 152 gc.render_component(plot_component) 153 try: 154 gc.save(filename) 155 except KeyError as e: 156 errmsg = ( 157 "The filename must have an extension that matches " 158 "a graphics format, such as '.png' or '.tiff'." 159 ) 160 if str(e.message) != "": 161 errmsg = ( 162 "Unknown filename extension: '%s'\n" % str(e.message) 163 ) + errmsg 164 165 error(None, errmsg, title="Invalid Filename Extension") 166 167 # Restore the toolbar. 168 plot_component.add_toolbar() 169 170 171 class CopyToClipboardButton(ToolbarButton): 172 label = "Copy Image" 173 tooltip = "Copy to the clipboard" 174 image = "edit-copy" 175 176 def perform(self, event): 177 plot_component = self.container.component 178 179 # Remove the toolbar before saving the plot, so the output doesn't 180 # include the toolbar. 181 plot_component.remove_toolbar() 182 183 width, height = plot_component.outer_bounds 184 185 gc = PlotGraphicsContext((width, height), dpi=72) 186 gc.render_component(plot_component) 187 188 if ETSConfig.toolkit == "wx": 189 self._perform_wx(width, height, gc) 190 else: 191 pass 192 193 # Restore the toolbar. 194 plot_component.add_toolbar() 195 196 def _perform_wx(self, width, height, gc): 197 import wx 198 199 bitmap = wx.BitmapFromBufferRGBA( 200 width + 1, height + 1, gc.bmp_array.flatten() 201 ) 202 data = wx.BitmapDataObject() 203 data.SetBitmap(bitmap) 204 if wx.TheClipboard.Open(): 205 wx.TheClipboard.SetData(data) 206 wx.TheClipboard.Close() 207 else: 208 wx.MessageBox("Unable to open the clipboard.", "Error") 209 210 211 class ExportDataToClipboardButton(ToolbarButton): 212 label = "Copy Data" 213 tooltip = "Copy data to the clipboard" 214 image = "application-vnd-ms-excel" 215 216 orientation = Enum("v", "h") 217 218 def perform(self, event): 219 if ETSConfig.toolkit == "wx": 220 self._perform_wx() 221 elif ETSConfig.toolkit == "qt4": 222 self._perform_qt() 223 else: 224 pass 225 226 def _get_data_from_plots(self): 227 values = [] 228 indices = [] 229 for renderers in self.container.component.plots.values(): 230 for renderer in renderers: 231 indices.append(renderer.index.get_data()) 232 values.append(renderer.value.get_data()) 233 return indices, values 234 235 def _serialize_data(self, indices, values): 236 237 # if all of rows are the same length, use faster algorithms, 238 # otherwise go element by element adding the necessary empty strings 239 if len(set([len(l) for l in values])) == 1: 240 data = [indices[0]] + values 241 if self.orientation == "v": 242 data = numpy.array(data).T.tolist() 243 244 data_str = "" 245 for row in data: 246 data_str += ",".join(["%f" % v for v in row]) + "\n" 247 return data_str 248 249 else: 250 # There might not be a single solution which fits all cases, 251 # so this is left to specific implementations to override 252 raise NotImplementedError() 253 254 def _perform_wx(self): 255 import wx 256 257 indices, values = self._get_data_from_plots() 258 data_str = self._serialize_data(indices, values) 259 data_obj = wx.TextDataObject(data_str) 260 261 if wx.TheClipboard.Open(): 262 wx.TheClipboard.SetData(data_obj) 263 wx.TheClipboard.Close() 264 else: 265 wx.MessageBox("Unable to open the clipboard.", "Error") 266 267 def _perform_qt(self): 268 from pyface.qt import QtGui 269 270 indices, values = self._get_data_from_plots() 271 data_str = self._serialize_data(indices, values) 272 273 QtGui.QApplication.clipboard().setText(data_str) 274 [end of chaco/tools/toolbars/toolbar_buttons.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chaco/tools/toolbars/toolbar_buttons.py b/chaco/tools/toolbars/toolbar_buttons.py --- a/chaco/tools/toolbars/toolbar_buttons.py +++ b/chaco/tools/toolbars/toolbar_buttons.py @@ -196,7 +196,7 @@ def _perform_wx(self, width, height, gc): import wx - bitmap = wx.BitmapFromBufferRGBA( + bitmap = wx.Bitmap.FromBufferRGBA( width + 1, height + 1, gc.bmp_array.flatten() ) data = wx.BitmapDataObject()
{"golden_diff": "diff --git a/chaco/tools/toolbars/toolbar_buttons.py b/chaco/tools/toolbars/toolbar_buttons.py\n--- a/chaco/tools/toolbars/toolbar_buttons.py\n+++ b/chaco/tools/toolbars/toolbar_buttons.py\n@@ -196,7 +196,7 @@\n def _perform_wx(self, width, height, gc):\n import wx\n \n- bitmap = wx.BitmapFromBufferRGBA(\n+ bitmap = wx.Bitmap.FromBufferRGBA(\n width + 1, height + 1, gc.bmp_array.flatten()\n )\n data = wx.BitmapDataObject()\n", "issue": "Wx BitmapFromBufferRGBA deprecated in favor of Bitmap.FromBufferRGBA\n```\r\nc:\\users\\rporuri\\work\\github\\ets\\chaco\\chaco\\tools\\toolbars\\toolbar_buttons.py:190: wxPyDeprecationWarning: Call to deprecated item BitmapFromBufferRGBA. Use :meth:`wx.Bitmap.FromBufferRGBA` instead.\r\n width + 1, height + 1, gc.bmp_array.flatten()\r\n```\r\n\r\nDiscovered when running `examples/demo/depth.py`\n", "before_files": [{"content": "# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX\n# All rights reserved.\n#\n# This software is provided without warranty under the terms of the BSD\n# license included in LICENSE.txt and may be redistributed only under\n# the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n#\n# Thanks for using Enthought open source!\n\nimport numpy\n\nfrom traits.etsconfig.api import ETSConfig\nfrom enable.tools.toolbars.toolbar_buttons import Button\nfrom chaco.tools.zoom_tool import ZoomTool\nfrom chaco.plot_graphics_context import PlotGraphicsContext\nfrom kiva.image import Image\nfrom pyface.image_resource import ImageResource\nfrom pyface.api import FileDialog, OK, error\nfrom traits.api import (\n Instance,\n Str,\n Property,\n cached_property,\n List,\n Int,\n Enum,\n)\n\n\nclass ToolbarButton(Button):\n image = Str()\n _image = Instance(Image)\n\n color = \"black\"\n\n width = Property(Int, observe=\"label, image\")\n height = Property(Int, observe=\"label, image\")\n\n # bounds are used for hit testing\n bounds = Property(List, observe=\"label, image\")\n\n def __init__(self, *args, **kw):\n super().__init__(*args, **kw)\n\n image_resource = ImageResource(self.image)\n self._image = Image(image_resource.absolute_path)\n\n @cached_property\n def _get_width(self):\n gc = PlotGraphicsContext((100, 100), dpi=72)\n gc.set_font(self.label_font)\n (w, h, descent, leading) = gc.get_full_text_extent(self.label)\n return max(self._image.width(), w)\n\n @cached_property\n def _get_height(self):\n gc = PlotGraphicsContext((100, 100), dpi=72)\n gc.set_font(self.label_font)\n (w, h, descent, leading) = gc.get_full_text_extent(self.label)\n return self._image.height() + h\n\n @cached_property\n def _get_bounds(self):\n return [self.width, self.height]\n\n def _draw_actual_button(self, gc):\n x_offset = self.x + (self.width - self._image.width()) / 2\n gc.draw_image(\n self._image,\n (x_offset, self.y + 2, self._image.width(), self._image.height()),\n )\n\n if self.label is not None and len(self.label) > 0:\n gc.set_font(self.label_font)\n\n (w, h, descent, leading) = gc.get_full_text_extent(self.label)\n if w < self.width:\n x_offset = self.x + (self.width - w) / 2\n else:\n x_offset = self.x\n\n gc.set_text_position(x_offset, self.y - 8)\n gc.show_text(self.label)\n\n\nclass IndexAxisLogButton(ToolbarButton):\n label = \"X Log Scale\"\n tooltip = \"Change index axis scale\"\n image = \"zoom-fit-width\"\n\n def perform(self, event):\n if self.container.component.index_scale == \"linear\":\n self.container.component.index_scale = \"log\"\n else:\n self.container.component.index_scale = \"linear\"\n self.container.request_redraw()\n\n\nclass ValueAxisLogButton(ToolbarButton):\n label = \"Y Log Scale\"\n tooltip = \"Change value axis scale\"\n image = \"zoom-fit-height\"\n\n def perform(self, event):\n if self.container.component.value_scale == \"linear\":\n self.container.component.value_scale = \"log\"\n else:\n self.container.component.value_scale = \"linear\"\n self.container.request_redraw()\n\n\nclass ZoomResetButton(ToolbarButton):\n label = \"Zoom Reset\"\n tooltip = \"Zoom Reset\"\n image = \"zoom-original\"\n\n def perform(self, event):\n plot_component = self.container.component\n\n for overlay in plot_component.overlays:\n if isinstance(overlay, ZoomTool):\n overlay._reset_state_pressed()\n\n self.container.request_redraw()\n\n\nclass SaveAsButton(ToolbarButton):\n label = \"Save As\"\n tooltip = \"Save As\"\n image = \"document-save\"\n\n def perform(self, event):\n\n plot_component = self.container.component\n\n filter = \"PNG file (*.png)|*.png|\\nTIFF file (*.tiff)|*.tiff|\"\n dialog = FileDialog(action=\"save as\", wildcard=filter)\n\n if dialog.open() != OK:\n return\n\n # Remove the toolbar before saving the plot, so the output doesn't\n # include the toolbar.\n plot_component.remove_toolbar()\n\n filename = dialog.path\n\n width, height = plot_component.outer_bounds\n\n gc = PlotGraphicsContext((width, height), dpi=72)\n gc.render_component(plot_component)\n try:\n gc.save(filename)\n except KeyError as e:\n errmsg = (\n \"The filename must have an extension that matches \"\n \"a graphics format, such as '.png' or '.tiff'.\"\n )\n if str(e.message) != \"\":\n errmsg = (\n \"Unknown filename extension: '%s'\\n\" % str(e.message)\n ) + errmsg\n\n error(None, errmsg, title=\"Invalid Filename Extension\")\n\n # Restore the toolbar.\n plot_component.add_toolbar()\n\n\nclass CopyToClipboardButton(ToolbarButton):\n label = \"Copy Image\"\n tooltip = \"Copy to the clipboard\"\n image = \"edit-copy\"\n\n def perform(self, event):\n plot_component = self.container.component\n\n # Remove the toolbar before saving the plot, so the output doesn't\n # include the toolbar.\n plot_component.remove_toolbar()\n\n width, height = plot_component.outer_bounds\n\n gc = PlotGraphicsContext((width, height), dpi=72)\n gc.render_component(plot_component)\n\n if ETSConfig.toolkit == \"wx\":\n self._perform_wx(width, height, gc)\n else:\n pass\n\n # Restore the toolbar.\n plot_component.add_toolbar()\n\n def _perform_wx(self, width, height, gc):\n import wx\n\n bitmap = wx.BitmapFromBufferRGBA(\n width + 1, height + 1, gc.bmp_array.flatten()\n )\n data = wx.BitmapDataObject()\n data.SetBitmap(bitmap)\n if wx.TheClipboard.Open():\n wx.TheClipboard.SetData(data)\n wx.TheClipboard.Close()\n else:\n wx.MessageBox(\"Unable to open the clipboard.\", \"Error\")\n\n\nclass ExportDataToClipboardButton(ToolbarButton):\n label = \"Copy Data\"\n tooltip = \"Copy data to the clipboard\"\n image = \"application-vnd-ms-excel\"\n\n orientation = Enum(\"v\", \"h\")\n\n def perform(self, event):\n if ETSConfig.toolkit == \"wx\":\n self._perform_wx()\n elif ETSConfig.toolkit == \"qt4\":\n self._perform_qt()\n else:\n pass\n\n def _get_data_from_plots(self):\n values = []\n indices = []\n for renderers in self.container.component.plots.values():\n for renderer in renderers:\n indices.append(renderer.index.get_data())\n values.append(renderer.value.get_data())\n return indices, values\n\n def _serialize_data(self, indices, values):\n\n # if all of rows are the same length, use faster algorithms,\n # otherwise go element by element adding the necessary empty strings\n if len(set([len(l) for l in values])) == 1:\n data = [indices[0]] + values\n if self.orientation == \"v\":\n data = numpy.array(data).T.tolist()\n\n data_str = \"\"\n for row in data:\n data_str += \",\".join([\"%f\" % v for v in row]) + \"\\n\"\n return data_str\n\n else:\n # There might not be a single solution which fits all cases,\n # so this is left to specific implementations to override\n raise NotImplementedError()\n\n def _perform_wx(self):\n import wx\n\n indices, values = self._get_data_from_plots()\n data_str = self._serialize_data(indices, values)\n data_obj = wx.TextDataObject(data_str)\n\n if wx.TheClipboard.Open():\n wx.TheClipboard.SetData(data_obj)\n wx.TheClipboard.Close()\n else:\n wx.MessageBox(\"Unable to open the clipboard.\", \"Error\")\n\n def _perform_qt(self):\n from pyface.qt import QtGui\n\n indices, values = self._get_data_from_plots()\n data_str = self._serialize_data(indices, values)\n\n QtGui.QApplication.clipboard().setText(data_str)\n", "path": "chaco/tools/toolbars/toolbar_buttons.py"}]}
3,257
126
gh_patches_debug_5316
rasdani/github-patches
git_diff
Theano__Theano-6225
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `.../c_code/dimshuffle.c` missing from setup.py? A Lasagne user just reported that ```bash pip install --upgrade --no-deps https://github.com/Theano/Theano/archive/master.zip ``` lead to the following error (I assume after installation, when trying to import it): ``` FileNotFoundError: [Errno 2] No such file or directory: '/Users/XXXX/anaconda/lib/python3.5/site-packages/theano/tensor/c_code/dimshuffle.c ``` The file was added in #6174 and is included in the `master.zip` snapshot, but maybe it's missing in `setup.py` and thus not copied over? You wouldn't notice when doing a development/editable install. The user solved it by downloading `tensor/c_code/dimshuffle.c` and `gpuarray/c_code/dimshuffle.c` manually from github. Disclaimer: I haven't verified this behavior myself. Add "c_code" folders to Theano installations To do that, I converted current `c_code` folders to python modules (I don't find other solution, currently). This should fix #6222 . I had forgotten that @abergeron warned about it in #6047. @nouiz @lamblin </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # 3 # TODO: 4 # * Figure out how to compile and install documentation automatically 5 # * Add download_url 6 7 from __future__ import absolute_import, print_function, division 8 import os 9 import subprocess 10 import codecs 11 from fnmatch import fnmatchcase 12 from distutils.util import convert_path 13 try: 14 from setuptools import setup 15 except ImportError: 16 from distutils.core import setup 17 18 19 CLASSIFIERS = """\ 20 Development Status :: 4 - Beta 21 Intended Audience :: Education 22 Intended Audience :: Science/Research 23 Intended Audience :: Developers 24 License :: OSI Approved :: BSD License 25 Programming Language :: Python 26 Topic :: Software Development :: Code Generators 27 Topic :: Software Development :: Compilers 28 Topic :: Scientific/Engineering :: Mathematics 29 Operating System :: Microsoft :: Windows 30 Operating System :: POSIX 31 Operating System :: Unix 32 Operating System :: MacOS 33 Programming Language :: Python :: 2 34 Programming Language :: Python :: 2.7 35 Programming Language :: Python :: 3 36 Programming Language :: Python :: 3.4 37 Programming Language :: Python :: 3.5 38 """ 39 NAME = 'Theano' 40 MAINTAINER = "LISA laboratory, University of Montreal" 41 MAINTAINER_EMAIL = "theano-dev@googlegroups.com" 42 DESCRIPTION = ('Optimizing compiler for evaluating mathematical ' + 43 'expressions on CPUs and GPUs.') 44 LONG_DESCRIPTION = (codecs.open("DESCRIPTION.txt", encoding='utf-8').read() + 45 "\n\n" + codecs.open("NEWS.txt", encoding='utf-8').read()) 46 URL = "http://deeplearning.net/software/theano/" 47 DOWNLOAD_URL = "" 48 LICENSE = 'BSD' 49 CLASSIFIERS = [_f for _f in CLASSIFIERS.split('\n') if _f] 50 AUTHOR = "LISA laboratory, University of Montreal" 51 AUTHOR_EMAIL = "theano-dev@googlegroups.com" 52 PLATFORMS = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"] 53 MAJOR = 0 54 MINOR = 10 55 MICRO = 0 56 SUFFIX = "dev1" # Should be blank except for rc's, betas, etc. 57 ISRELEASED = False 58 59 VERSION = '%d.%d.%d%s' % (MAJOR, MINOR, MICRO, SUFFIX) 60 61 62 def find_packages(where='.', exclude=()): 63 out = [] 64 stack = [(convert_path(where), '')] 65 while stack: 66 where, prefix = stack.pop(0) 67 for name in os.listdir(where): 68 fn = os.path.join(where, name) 69 if ('.' not in name and os.path.isdir(fn) and 70 os.path.isfile(os.path.join(fn, '__init__.py')) 71 ): 72 out.append(prefix+name) 73 stack.append((fn, prefix+name+'.')) 74 for pat in list(exclude) + ['ez_setup', 'distribute_setup']: 75 out = [item for item in out if not fnmatchcase(item, pat)] 76 return out 77 78 79 def git_version(): 80 """ 81 Return the sha1 of local git HEAD as a string. 82 """ 83 # josharian: I doubt that the minimal environment stuff here is 84 # still needed; it is inherited. This was originally 85 # an hg_version function borrowed from NumPy's setup.py. 86 # I'm leaving it in for now because I don't have enough other 87 # environments to test in to be confident that it is safe to remove. 88 def _minimal_ext_cmd(cmd): 89 # construct minimal environment 90 env = {} 91 for k in ['SYSTEMROOT', 'PATH', 'PYTHONPATH']: 92 v = os.environ.get(k) 93 if v is not None: 94 env[k] = v 95 # LANGUAGE is used on win32 96 env['LANGUAGE'] = 'C' 97 env['LANG'] = 'C' 98 env['LC_ALL'] = 'C' 99 out = subprocess.Popen( 100 cmd, 101 stdout=subprocess.PIPE, 102 env=env 103 ).communicate()[0] 104 return out 105 try: 106 out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) 107 git_revision = out.strip().decode('ascii') 108 except OSError: 109 git_revision = "unknown-git" 110 return git_revision 111 112 113 def write_text(filename, text): 114 try: 115 with open(filename, 'w') as a: 116 a.write(text) 117 except Exception as e: 118 print(e) 119 120 121 def write_version_py(filename=os.path.join('theano', 'generated_version.py')): 122 cnt = """ 123 # THIS FILE IS GENERATED FROM THEANO SETUP.PY 124 short_version = '%(version)s' 125 version = '%(version)s' 126 git_revision = '%(git_revision)s' 127 full_version = '%(version)s.dev-%%(git_revision)s' %% { 128 'git_revision': git_revision} 129 release = %(isrelease)s 130 131 if not release: 132 version = full_version 133 """ 134 FULL_VERSION = VERSION 135 if os.path.isdir('.git'): 136 GIT_REVISION = git_version() 137 elif os.path.exists(filename): 138 # must be a source distribution, use existing version file 139 GIT_REVISION = "RELEASE" 140 else: 141 GIT_REVISION = "unknown-git" 142 143 FULL_VERSION += '.dev-' + GIT_REVISION 144 text = cnt % {'version': VERSION, 145 'full_version': FULL_VERSION, 146 'git_revision': GIT_REVISION, 147 'isrelease': str(ISRELEASED)} 148 write_text(filename, text) 149 150 151 def do_setup(): 152 write_version_py() 153 setup(name=NAME, 154 version=VERSION, 155 description=DESCRIPTION, 156 long_description=LONG_DESCRIPTION, 157 classifiers=CLASSIFIERS, 158 author=AUTHOR, 159 author_email=AUTHOR_EMAIL, 160 url=URL, 161 license=LICENSE, 162 platforms=PLATFORMS, 163 packages=find_packages(), 164 install_requires=['numpy>=1.9.1', 'scipy>=0.14', 'six>=1.9.0'], 165 # pygments is a dependency for Sphinx code highlight 166 extras_require={ 167 'test': ['nose>=1.3.0', 'parameterized', 'flake8<3'], 168 'doc': ['Sphinx>=0.5.1', 'pygments'] 169 }, 170 package_data={ 171 '': ['*.txt', '*.rst', '*.cu', '*.cuh', '*.c', '*.sh', '*.pkl', 172 '*.h', '*.cpp', 'ChangeLog'], 173 'theano.misc': ['*.sh'], 174 'theano.d3viz' : ['html/*','css/*','js/*'] 175 }, 176 entry_points={ 177 'console_scripts': ['theano-cache = bin.theano_cache:main', 178 'theano-nose = bin.theano_nose:main'] 179 }, 180 keywords=' '.join([ 181 'theano', 'math', 'numerical', 'symbolic', 'blas', 182 'numpy', 'gpu', 'autodiff', 'differentiation' 183 ]), 184 ) 185 if __name__ == "__main__": 186 do_setup() 187 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -169,7 +169,7 @@ }, package_data={ '': ['*.txt', '*.rst', '*.cu', '*.cuh', '*.c', '*.sh', '*.pkl', - '*.h', '*.cpp', 'ChangeLog'], + '*.h', '*.cpp', 'ChangeLog', 'c_code/*'], 'theano.misc': ['*.sh'], 'theano.d3viz' : ['html/*','css/*','js/*'] },
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -169,7 +169,7 @@\n },\n package_data={\n '': ['*.txt', '*.rst', '*.cu', '*.cuh', '*.c', '*.sh', '*.pkl',\n- '*.h', '*.cpp', 'ChangeLog'],\n+ '*.h', '*.cpp', 'ChangeLog', 'c_code/*'],\n 'theano.misc': ['*.sh'],\n 'theano.d3viz' : ['html/*','css/*','js/*']\n },\n", "issue": "`.../c_code/dimshuffle.c` missing from setup.py?\nA Lasagne user just reported that\r\n```bash\r\npip install --upgrade --no-deps https://github.com/Theano/Theano/archive/master.zip\r\n```\r\nlead to the following error (I assume after installation, when trying to import it):\r\n```\r\nFileNotFoundError: [Errno 2] No such file or directory: '/Users/XXXX/anaconda/lib/python3.5/site-packages/theano/tensor/c_code/dimshuffle.c\r\n```\r\nThe file was added in #6174 and is included in the `master.zip` snapshot, but maybe it's missing in `setup.py` and thus not copied over? You wouldn't notice when doing a development/editable install. The user solved it by downloading `tensor/c_code/dimshuffle.c` and `gpuarray/c_code/dimshuffle.c` manually from github.\r\n\r\nDisclaimer: I haven't verified this behavior myself.\nAdd \"c_code\" folders to Theano installations\nTo do that, I converted current `c_code` folders to python modules (I don't find other solution, currently). This should fix #6222 . I had forgotten that @abergeron warned about it in #6047.\r\n\r\n@nouiz @lamblin \n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# TODO:\n# * Figure out how to compile and install documentation automatically\n# * Add download_url\n\nfrom __future__ import absolute_import, print_function, division\nimport os\nimport subprocess\nimport codecs\nfrom fnmatch import fnmatchcase\nfrom distutils.util import convert_path\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 4 - Beta\nIntended Audience :: Education\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: BSD License\nProgramming Language :: Python\nTopic :: Software Development :: Code Generators\nTopic :: Software Development :: Compilers\nTopic :: Scientific/Engineering :: Mathematics\nOperating System :: Microsoft :: Windows\nOperating System :: POSIX\nOperating System :: Unix\nOperating System :: MacOS\nProgramming Language :: Python :: 2\nProgramming Language :: Python :: 2.7\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.4\nProgramming Language :: Python :: 3.5\n\"\"\"\nNAME = 'Theano'\nMAINTAINER = \"LISA laboratory, University of Montreal\"\nMAINTAINER_EMAIL = \"theano-dev@googlegroups.com\"\nDESCRIPTION = ('Optimizing compiler for evaluating mathematical ' +\n 'expressions on CPUs and GPUs.')\nLONG_DESCRIPTION = (codecs.open(\"DESCRIPTION.txt\", encoding='utf-8').read() +\n \"\\n\\n\" + codecs.open(\"NEWS.txt\", encoding='utf-8').read())\nURL = \"http://deeplearning.net/software/theano/\"\nDOWNLOAD_URL = \"\"\nLICENSE = 'BSD'\nCLASSIFIERS = [_f for _f in CLASSIFIERS.split('\\n') if _f]\nAUTHOR = \"LISA laboratory, University of Montreal\"\nAUTHOR_EMAIL = \"theano-dev@googlegroups.com\"\nPLATFORMS = [\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"]\nMAJOR = 0\nMINOR = 10\nMICRO = 0\nSUFFIX = \"dev1\" # Should be blank except for rc's, betas, etc.\nISRELEASED = False\n\nVERSION = '%d.%d.%d%s' % (MAJOR, MINOR, MICRO, SUFFIX)\n\n\ndef find_packages(where='.', exclude=()):\n out = []\n stack = [(convert_path(where), '')]\n while stack:\n where, prefix = stack.pop(0)\n for name in os.listdir(where):\n fn = os.path.join(where, name)\n if ('.' not in name and os.path.isdir(fn) and\n os.path.isfile(os.path.join(fn, '__init__.py'))\n ):\n out.append(prefix+name)\n stack.append((fn, prefix+name+'.'))\n for pat in list(exclude) + ['ez_setup', 'distribute_setup']:\n out = [item for item in out if not fnmatchcase(item, pat)]\n return out\n\n\ndef git_version():\n \"\"\"\n Return the sha1 of local git HEAD as a string.\n \"\"\"\n # josharian: I doubt that the minimal environment stuff here is\n # still needed; it is inherited. This was originally\n # an hg_version function borrowed from NumPy's setup.py.\n # I'm leaving it in for now because I don't have enough other\n # environments to test in to be confident that it is safe to remove.\n def _minimal_ext_cmd(cmd):\n # construct minimal environment\n env = {}\n for k in ['SYSTEMROOT', 'PATH', 'PYTHONPATH']:\n v = os.environ.get(k)\n if v is not None:\n env[k] = v\n # LANGUAGE is used on win32\n env['LANGUAGE'] = 'C'\n env['LANG'] = 'C'\n env['LC_ALL'] = 'C'\n out = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n env=env\n ).communicate()[0]\n return out\n try:\n out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])\n git_revision = out.strip().decode('ascii')\n except OSError:\n git_revision = \"unknown-git\"\n return git_revision\n\n\ndef write_text(filename, text):\n try:\n with open(filename, 'w') as a:\n a.write(text)\n except Exception as e:\n print(e)\n\n\ndef write_version_py(filename=os.path.join('theano', 'generated_version.py')):\n cnt = \"\"\"\n# THIS FILE IS GENERATED FROM THEANO SETUP.PY\nshort_version = '%(version)s'\nversion = '%(version)s'\ngit_revision = '%(git_revision)s'\nfull_version = '%(version)s.dev-%%(git_revision)s' %% {\n 'git_revision': git_revision}\nrelease = %(isrelease)s\n\nif not release:\n version = full_version\n\"\"\"\n FULL_VERSION = VERSION\n if os.path.isdir('.git'):\n GIT_REVISION = git_version()\n elif os.path.exists(filename):\n # must be a source distribution, use existing version file\n GIT_REVISION = \"RELEASE\"\n else:\n GIT_REVISION = \"unknown-git\"\n\n FULL_VERSION += '.dev-' + GIT_REVISION\n text = cnt % {'version': VERSION,\n 'full_version': FULL_VERSION,\n 'git_revision': GIT_REVISION,\n 'isrelease': str(ISRELEASED)}\n write_text(filename, text)\n\n\ndef do_setup():\n write_version_py()\n setup(name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n classifiers=CLASSIFIERS,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n url=URL,\n license=LICENSE,\n platforms=PLATFORMS,\n packages=find_packages(),\n install_requires=['numpy>=1.9.1', 'scipy>=0.14', 'six>=1.9.0'],\n # pygments is a dependency for Sphinx code highlight\n extras_require={\n 'test': ['nose>=1.3.0', 'parameterized', 'flake8<3'],\n 'doc': ['Sphinx>=0.5.1', 'pygments']\n },\n package_data={\n '': ['*.txt', '*.rst', '*.cu', '*.cuh', '*.c', '*.sh', '*.pkl',\n '*.h', '*.cpp', 'ChangeLog'],\n 'theano.misc': ['*.sh'],\n 'theano.d3viz' : ['html/*','css/*','js/*']\n },\n entry_points={\n 'console_scripts': ['theano-cache = bin.theano_cache:main',\n 'theano-nose = bin.theano_nose:main']\n },\n keywords=' '.join([\n 'theano', 'math', 'numerical', 'symbolic', 'blas',\n 'numpy', 'gpu', 'autodiff', 'differentiation'\n ]),\n )\nif __name__ == \"__main__\":\n do_setup()\n", "path": "setup.py"}]}
2,766
127
gh_patches_debug_3653
rasdani/github-patches
git_diff
pypa__pip-9569
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Set encoding for tar file and use unicode path for unpacking When tarfile.TarFile decodes filenames in Python 2.7 by default it uses sys.getfilesystemencoding. On Windows this returns "mbcs", which is lossy when converting from proper utf-8 to bytes (results in '?' for out of range characters). We now pass an encoding to tarfile.open which will be used instead. Since the encoding argument is only ever used for the PAX format, and since the PAX format guarantees utf-8 encoded information, this should work in all circumstances. For filesystem APIs in Python 2, the type of the path object passed dictates the underlying Windows API that is called. For `str` it is the `*A` (for ANSI) APIs. For `unicode` it is the `*W` (for Wide character) APIs. To use the second set of APIs, which properly handles unicode filenames, we convert the byte path to utf-8. Fixes #7667. Filename encoding error in some environments with PAX sdist **Environment** * pip version: any * Python version: 2.7 * OS: Windows, non-Windows in C locale (pip Windows CI hits this) **Description** The PAX format wheel 0.34.1 sdists fail to install on Python 2.7 on Windows with a UnicodeEncodeError, or on non-Windows systems in a non-utf-8 locale: https://github.com/pypa/wheel/issues/331 **Expected behavior** Unicode filename from the PAX tarball is correctly encoded for the local filesystem. **How to Reproduce** Attempt to install a PAX formatted tarball containing a file name that cannot be encoded to the default code page (Windows) or the default locale encoding (non-Windows). In GNU tar, the affected paths are pre-mangled to something ASCII compatible, but PAX tar preserves them correctly, so the installer needs to handle them itself. **Output** See https://dev.azure.com/pypa/pip/_build/results?buildId=18040&view=logs&j=404e6841-f5ba-57d9-f2c8-8c5322057572&t=0219f6bf-240d-5b08-c877-377b12af5079&l=309 for a Windows example in the pip test suite. The wheel issue linked above has some Linux examples. </issue> <code> [start of src/pip/_internal/utils/unpacking.py] 1 """Utilities related archives. 2 """ 3 4 import logging 5 import os 6 import shutil 7 import stat 8 import tarfile 9 import zipfile 10 from typing import Iterable, List, Optional 11 from zipfile import ZipInfo 12 13 from pip._internal.exceptions import InstallationError 14 from pip._internal.utils.filetypes import ( 15 BZ2_EXTENSIONS, 16 TAR_EXTENSIONS, 17 XZ_EXTENSIONS, 18 ZIP_EXTENSIONS, 19 ) 20 from pip._internal.utils.misc import ensure_dir 21 22 logger = logging.getLogger(__name__) 23 24 25 SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS 26 27 try: 28 import bz2 # noqa 29 30 SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS 31 except ImportError: 32 logger.debug("bz2 module is not available") 33 34 try: 35 # Only for Python 3.3+ 36 import lzma # noqa 37 38 SUPPORTED_EXTENSIONS += XZ_EXTENSIONS 39 except ImportError: 40 logger.debug("lzma module is not available") 41 42 43 def current_umask(): 44 # type: () -> int 45 """Get the current umask which involves having to set it temporarily.""" 46 mask = os.umask(0) 47 os.umask(mask) 48 return mask 49 50 51 def split_leading_dir(path): 52 # type: (str) -> List[str] 53 path = path.lstrip("/").lstrip("\\") 54 if "/" in path and ( 55 ("\\" in path and path.find("/") < path.find("\\")) or "\\" not in path 56 ): 57 return path.split("/", 1) 58 elif "\\" in path: 59 return path.split("\\", 1) 60 else: 61 return [path, ""] 62 63 64 def has_leading_dir(paths): 65 # type: (Iterable[str]) -> bool 66 """Returns true if all the paths have the same leading path name 67 (i.e., everything is in one subdirectory in an archive)""" 68 common_prefix = None 69 for path in paths: 70 prefix, rest = split_leading_dir(path) 71 if not prefix: 72 return False 73 elif common_prefix is None: 74 common_prefix = prefix 75 elif prefix != common_prefix: 76 return False 77 return True 78 79 80 def is_within_directory(directory, target): 81 # type: (str, str) -> bool 82 """ 83 Return true if the absolute path of target is within the directory 84 """ 85 abs_directory = os.path.abspath(directory) 86 abs_target = os.path.abspath(target) 87 88 prefix = os.path.commonprefix([abs_directory, abs_target]) 89 return prefix == abs_directory 90 91 92 def set_extracted_file_to_default_mode_plus_executable(path): 93 # type: (str) -> None 94 """ 95 Make file present at path have execute for user/group/world 96 (chmod +x) is no-op on windows per python docs 97 """ 98 os.chmod(path, (0o777 & ~current_umask() | 0o111)) 99 100 101 def zip_item_is_executable(info): 102 # type: (ZipInfo) -> bool 103 mode = info.external_attr >> 16 104 # if mode and regular file and any execute permissions for 105 # user/group/world? 106 return bool(mode and stat.S_ISREG(mode) and mode & 0o111) 107 108 109 def unzip_file(filename, location, flatten=True): 110 # type: (str, str, bool) -> None 111 """ 112 Unzip the file (with path `filename`) to the destination `location`. All 113 files are written based on system defaults and umask (i.e. permissions are 114 not preserved), except that regular file members with any execute 115 permissions (user, group, or world) have "chmod +x" applied after being 116 written. Note that for windows, any execute changes using os.chmod are 117 no-ops per the python docs. 118 """ 119 ensure_dir(location) 120 zipfp = open(filename, "rb") 121 try: 122 zip = zipfile.ZipFile(zipfp, allowZip64=True) 123 leading = has_leading_dir(zip.namelist()) and flatten 124 for info in zip.infolist(): 125 name = info.filename 126 fn = name 127 if leading: 128 fn = split_leading_dir(name)[1] 129 fn = os.path.join(location, fn) 130 dir = os.path.dirname(fn) 131 if not is_within_directory(location, fn): 132 message = ( 133 "The zip file ({}) has a file ({}) trying to install " 134 "outside target directory ({})" 135 ) 136 raise InstallationError(message.format(filename, fn, location)) 137 if fn.endswith("/") or fn.endswith("\\"): 138 # A directory 139 ensure_dir(fn) 140 else: 141 ensure_dir(dir) 142 # Don't use read() to avoid allocating an arbitrarily large 143 # chunk of memory for the file's content 144 fp = zip.open(name) 145 try: 146 with open(fn, "wb") as destfp: 147 shutil.copyfileobj(fp, destfp) 148 finally: 149 fp.close() 150 if zip_item_is_executable(info): 151 set_extracted_file_to_default_mode_plus_executable(fn) 152 finally: 153 zipfp.close() 154 155 156 def untar_file(filename, location): 157 # type: (str, str) -> None 158 """ 159 Untar the file (with path `filename`) to the destination `location`. 160 All files are written based on system defaults and umask (i.e. permissions 161 are not preserved), except that regular file members with any execute 162 permissions (user, group, or world) have "chmod +x" applied after being 163 written. Note that for windows, any execute changes using os.chmod are 164 no-ops per the python docs. 165 """ 166 ensure_dir(location) 167 if filename.lower().endswith(".gz") or filename.lower().endswith(".tgz"): 168 mode = "r:gz" 169 elif filename.lower().endswith(BZ2_EXTENSIONS): 170 mode = "r:bz2" 171 elif filename.lower().endswith(XZ_EXTENSIONS): 172 mode = "r:xz" 173 elif filename.lower().endswith(".tar"): 174 mode = "r" 175 else: 176 logger.warning( 177 "Cannot determine compression type for file %s", 178 filename, 179 ) 180 mode = "r:*" 181 tar = tarfile.open(filename, mode) 182 try: 183 leading = has_leading_dir([member.name for member in tar.getmembers()]) 184 for member in tar.getmembers(): 185 fn = member.name 186 if leading: 187 fn = split_leading_dir(fn)[1] 188 path = os.path.join(location, fn) 189 if not is_within_directory(location, path): 190 message = ( 191 "The tar file ({}) has a file ({}) trying to install " 192 "outside target directory ({})" 193 ) 194 raise InstallationError(message.format(filename, path, location)) 195 if member.isdir(): 196 ensure_dir(path) 197 elif member.issym(): 198 try: 199 # https://github.com/python/typeshed/issues/2673 200 tar._extract_member(member, path) # type: ignore 201 except Exception as exc: 202 # Some corrupt tar files seem to produce this 203 # (specifically bad symlinks) 204 logger.warning( 205 "In the tar file %s the member %s is invalid: %s", 206 filename, 207 member.name, 208 exc, 209 ) 210 continue 211 else: 212 try: 213 fp = tar.extractfile(member) 214 except (KeyError, AttributeError) as exc: 215 # Some corrupt tar files seem to produce this 216 # (specifically bad symlinks) 217 logger.warning( 218 "In the tar file %s the member %s is invalid: %s", 219 filename, 220 member.name, 221 exc, 222 ) 223 continue 224 ensure_dir(os.path.dirname(path)) 225 assert fp is not None 226 with open(path, "wb") as destfp: 227 shutil.copyfileobj(fp, destfp) 228 fp.close() 229 # Update the timestamp (useful for cython compiled files) 230 tar.utime(member, path) 231 # member have any execute permissions for user/group/world? 232 if member.mode & 0o111: 233 set_extracted_file_to_default_mode_plus_executable(path) 234 finally: 235 tar.close() 236 237 238 def unpack_file( 239 filename, # type: str 240 location, # type: str 241 content_type=None, # type: Optional[str] 242 ): 243 # type: (...) -> None 244 filename = os.path.realpath(filename) 245 if ( 246 content_type == "application/zip" 247 or filename.lower().endswith(ZIP_EXTENSIONS) 248 or zipfile.is_zipfile(filename) 249 ): 250 unzip_file(filename, location, flatten=not filename.endswith(".whl")) 251 elif ( 252 content_type == "application/x-gzip" 253 or tarfile.is_tarfile(filename) 254 or filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS) 255 ): 256 untar_file(filename, location) 257 else: 258 # FIXME: handle? 259 # FIXME: magic signatures? 260 logger.critical( 261 "Cannot unpack file %s (downloaded from %s, content-type: %s); " 262 "cannot detect archive format", 263 filename, 264 location, 265 content_type, 266 ) 267 raise InstallationError(f"Cannot determine archive format of {location}") 268 [end of src/pip/_internal/utils/unpacking.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pip/_internal/utils/unpacking.py b/src/pip/_internal/utils/unpacking.py --- a/src/pip/_internal/utils/unpacking.py +++ b/src/pip/_internal/utils/unpacking.py @@ -178,7 +178,7 @@ filename, ) mode = "r:*" - tar = tarfile.open(filename, mode) + tar = tarfile.open(filename, mode, encoding="utf-8") try: leading = has_leading_dir([member.name for member in tar.getmembers()]) for member in tar.getmembers():
{"golden_diff": "diff --git a/src/pip/_internal/utils/unpacking.py b/src/pip/_internal/utils/unpacking.py\n--- a/src/pip/_internal/utils/unpacking.py\n+++ b/src/pip/_internal/utils/unpacking.py\n@@ -178,7 +178,7 @@\n filename,\n )\n mode = \"r:*\"\n- tar = tarfile.open(filename, mode)\n+ tar = tarfile.open(filename, mode, encoding=\"utf-8\")\n try:\n leading = has_leading_dir([member.name for member in tar.getmembers()])\n for member in tar.getmembers():\n", "issue": "Set encoding for tar file and use unicode path for unpacking\nWhen tarfile.TarFile decodes filenames in Python 2.7 by default it uses\r\nsys.getfilesystemencoding. On Windows this returns \"mbcs\", which is\r\nlossy when converting from proper utf-8 to bytes (results in '?' for out\r\nof range characters).\r\n\r\nWe now pass an encoding to tarfile.open which will be used instead.\r\nSince the encoding argument is only ever used for the PAX format, and\r\nsince the PAX format guarantees utf-8 encoded information, this should\r\nwork in all circumstances.\r\n\r\nFor filesystem APIs in Python 2, the type of the path object passed\r\ndictates the underlying Windows API that is called. For `str` it is the\r\n`*A` (for ANSI) APIs. For `unicode` it is the `*W` (for Wide character)\r\nAPIs. To use the second set of APIs, which properly handles unicode\r\nfilenames, we convert the byte path to utf-8.\r\n\r\nFixes #7667.\nFilename encoding error in some environments with PAX sdist\n**Environment**\r\n\r\n* pip version: any\r\n* Python version: 2.7\r\n* OS: Windows, non-Windows in C locale\r\n\r\n(pip Windows CI hits this)\r\n\r\n**Description**\r\nThe PAX format wheel 0.34.1 sdists fail to install on Python 2.7 on Windows with a UnicodeEncodeError, or on non-Windows systems in a non-utf-8 locale: https://github.com/pypa/wheel/issues/331\r\n\r\n**Expected behavior**\r\nUnicode filename from the PAX tarball is correctly encoded for the local filesystem.\r\n\r\n**How to Reproduce**\r\nAttempt to install a PAX formatted tarball containing a file name that cannot be encoded to the default code page (Windows) or the default locale encoding (non-Windows).\r\n\r\nIn GNU tar, the affected paths are pre-mangled to something ASCII compatible, but PAX tar preserves them correctly, so the installer needs to handle them itself.\r\n\r\n**Output**\r\n\r\nSee \r\nhttps://dev.azure.com/pypa/pip/_build/results?buildId=18040&view=logs&j=404e6841-f5ba-57d9-f2c8-8c5322057572&t=0219f6bf-240d-5b08-c877-377b12af5079&l=309 for a Windows example in the pip test suite.\r\n\r\nThe wheel issue linked above has some Linux examples.\n", "before_files": [{"content": "\"\"\"Utilities related archives.\n\"\"\"\n\nimport logging\nimport os\nimport shutil\nimport stat\nimport tarfile\nimport zipfile\nfrom typing import Iterable, List, Optional\nfrom zipfile import ZipInfo\n\nfrom pip._internal.exceptions import InstallationError\nfrom pip._internal.utils.filetypes import (\n BZ2_EXTENSIONS,\n TAR_EXTENSIONS,\n XZ_EXTENSIONS,\n ZIP_EXTENSIONS,\n)\nfrom pip._internal.utils.misc import ensure_dir\n\nlogger = logging.getLogger(__name__)\n\n\nSUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS\n\ntry:\n import bz2 # noqa\n\n SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS\nexcept ImportError:\n logger.debug(\"bz2 module is not available\")\n\ntry:\n # Only for Python 3.3+\n import lzma # noqa\n\n SUPPORTED_EXTENSIONS += XZ_EXTENSIONS\nexcept ImportError:\n logger.debug(\"lzma module is not available\")\n\n\ndef current_umask():\n # type: () -> int\n \"\"\"Get the current umask which involves having to set it temporarily.\"\"\"\n mask = os.umask(0)\n os.umask(mask)\n return mask\n\n\ndef split_leading_dir(path):\n # type: (str) -> List[str]\n path = path.lstrip(\"/\").lstrip(\"\\\\\")\n if \"/\" in path and (\n (\"\\\\\" in path and path.find(\"/\") < path.find(\"\\\\\")) or \"\\\\\" not in path\n ):\n return path.split(\"/\", 1)\n elif \"\\\\\" in path:\n return path.split(\"\\\\\", 1)\n else:\n return [path, \"\"]\n\n\ndef has_leading_dir(paths):\n # type: (Iterable[str]) -> bool\n \"\"\"Returns true if all the paths have the same leading path name\n (i.e., everything is in one subdirectory in an archive)\"\"\"\n common_prefix = None\n for path in paths:\n prefix, rest = split_leading_dir(path)\n if not prefix:\n return False\n elif common_prefix is None:\n common_prefix = prefix\n elif prefix != common_prefix:\n return False\n return True\n\n\ndef is_within_directory(directory, target):\n # type: (str, str) -> bool\n \"\"\"\n Return true if the absolute path of target is within the directory\n \"\"\"\n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n\n prefix = os.path.commonprefix([abs_directory, abs_target])\n return prefix == abs_directory\n\n\ndef set_extracted_file_to_default_mode_plus_executable(path):\n # type: (str) -> None\n \"\"\"\n Make file present at path have execute for user/group/world\n (chmod +x) is no-op on windows per python docs\n \"\"\"\n os.chmod(path, (0o777 & ~current_umask() | 0o111))\n\n\ndef zip_item_is_executable(info):\n # type: (ZipInfo) -> bool\n mode = info.external_attr >> 16\n # if mode and regular file and any execute permissions for\n # user/group/world?\n return bool(mode and stat.S_ISREG(mode) and mode & 0o111)\n\n\ndef unzip_file(filename, location, flatten=True):\n # type: (str, str, bool) -> None\n \"\"\"\n Unzip the file (with path `filename`) to the destination `location`. All\n files are written based on system defaults and umask (i.e. permissions are\n not preserved), except that regular file members with any execute\n permissions (user, group, or world) have \"chmod +x\" applied after being\n written. Note that for windows, any execute changes using os.chmod are\n no-ops per the python docs.\n \"\"\"\n ensure_dir(location)\n zipfp = open(filename, \"rb\")\n try:\n zip = zipfile.ZipFile(zipfp, allowZip64=True)\n leading = has_leading_dir(zip.namelist()) and flatten\n for info in zip.infolist():\n name = info.filename\n fn = name\n if leading:\n fn = split_leading_dir(name)[1]\n fn = os.path.join(location, fn)\n dir = os.path.dirname(fn)\n if not is_within_directory(location, fn):\n message = (\n \"The zip file ({}) has a file ({}) trying to install \"\n \"outside target directory ({})\"\n )\n raise InstallationError(message.format(filename, fn, location))\n if fn.endswith(\"/\") or fn.endswith(\"\\\\\"):\n # A directory\n ensure_dir(fn)\n else:\n ensure_dir(dir)\n # Don't use read() to avoid allocating an arbitrarily large\n # chunk of memory for the file's content\n fp = zip.open(name)\n try:\n with open(fn, \"wb\") as destfp:\n shutil.copyfileobj(fp, destfp)\n finally:\n fp.close()\n if zip_item_is_executable(info):\n set_extracted_file_to_default_mode_plus_executable(fn)\n finally:\n zipfp.close()\n\n\ndef untar_file(filename, location):\n # type: (str, str) -> None\n \"\"\"\n Untar the file (with path `filename`) to the destination `location`.\n All files are written based on system defaults and umask (i.e. permissions\n are not preserved), except that regular file members with any execute\n permissions (user, group, or world) have \"chmod +x\" applied after being\n written. Note that for windows, any execute changes using os.chmod are\n no-ops per the python docs.\n \"\"\"\n ensure_dir(location)\n if filename.lower().endswith(\".gz\") or filename.lower().endswith(\".tgz\"):\n mode = \"r:gz\"\n elif filename.lower().endswith(BZ2_EXTENSIONS):\n mode = \"r:bz2\"\n elif filename.lower().endswith(XZ_EXTENSIONS):\n mode = \"r:xz\"\n elif filename.lower().endswith(\".tar\"):\n mode = \"r\"\n else:\n logger.warning(\n \"Cannot determine compression type for file %s\",\n filename,\n )\n mode = \"r:*\"\n tar = tarfile.open(filename, mode)\n try:\n leading = has_leading_dir([member.name for member in tar.getmembers()])\n for member in tar.getmembers():\n fn = member.name\n if leading:\n fn = split_leading_dir(fn)[1]\n path = os.path.join(location, fn)\n if not is_within_directory(location, path):\n message = (\n \"The tar file ({}) has a file ({}) trying to install \"\n \"outside target directory ({})\"\n )\n raise InstallationError(message.format(filename, path, location))\n if member.isdir():\n ensure_dir(path)\n elif member.issym():\n try:\n # https://github.com/python/typeshed/issues/2673\n tar._extract_member(member, path) # type: ignore\n except Exception as exc:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n logger.warning(\n \"In the tar file %s the member %s is invalid: %s\",\n filename,\n member.name,\n exc,\n )\n continue\n else:\n try:\n fp = tar.extractfile(member)\n except (KeyError, AttributeError) as exc:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n logger.warning(\n \"In the tar file %s the member %s is invalid: %s\",\n filename,\n member.name,\n exc,\n )\n continue\n ensure_dir(os.path.dirname(path))\n assert fp is not None\n with open(path, \"wb\") as destfp:\n shutil.copyfileobj(fp, destfp)\n fp.close()\n # Update the timestamp (useful for cython compiled files)\n tar.utime(member, path)\n # member have any execute permissions for user/group/world?\n if member.mode & 0o111:\n set_extracted_file_to_default_mode_plus_executable(path)\n finally:\n tar.close()\n\n\ndef unpack_file(\n filename, # type: str\n location, # type: str\n content_type=None, # type: Optional[str]\n):\n # type: (...) -> None\n filename = os.path.realpath(filename)\n if (\n content_type == \"application/zip\"\n or filename.lower().endswith(ZIP_EXTENSIONS)\n or zipfile.is_zipfile(filename)\n ):\n unzip_file(filename, location, flatten=not filename.endswith(\".whl\"))\n elif (\n content_type == \"application/x-gzip\"\n or tarfile.is_tarfile(filename)\n or filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)\n ):\n untar_file(filename, location)\n else:\n # FIXME: handle?\n # FIXME: magic signatures?\n logger.critical(\n \"Cannot unpack file %s (downloaded from %s, content-type: %s); \"\n \"cannot detect archive format\",\n filename,\n location,\n content_type,\n )\n raise InstallationError(f\"Cannot determine archive format of {location}\")\n", "path": "src/pip/_internal/utils/unpacking.py"}]}
3,773
130
gh_patches_debug_13
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-1779
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ebola Page>Map: disable scroll wheel zoom CJ - The specific property is here: https://github.com/OCHA-DAP/hdx-design/blob/gh-pages/js/country.js line 111: map.scrollWheelZoom.disable(); </issue> <code> [start of ckanext-hdx_theme/ckanext/hdx_theme/version.py] 1 hdx_version = 'v0.5.1' 2 [end of ckanext-hdx_theme/ckanext/hdx_theme/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version = 'v0.5.1' +hdx_version = 'v0.5.2'
{"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.5.1'\n+hdx_version = 'v0.5.2'\n", "issue": "Ebola Page>Map: disable scroll wheel zoom\nCJ - The specific property is here: https://github.com/OCHA-DAP/hdx-design/blob/gh-pages/js/country.js\n\nline 111: map.scrollWheelZoom.disable();\n\n", "before_files": [{"content": "hdx_version = 'v0.5.1'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]}
621
106
gh_patches_debug_32295
rasdani/github-patches
git_diff
deepset-ai__haystack-6301
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Load additional fields from SQUAD-format file to meta field for labels **Is your feature request related to a problem? Please describe.** Currently `eval_data_from_json` loads additional fields for Documents as meta field but it doesn't do it for Labels. **Describe the solution you'd like** `eval_data_from_json` loads additional fields to `Label.meta` dict analogically to how it's done for Documents. **Describe alternatives you've considered** Currently I'd would have to rewrite whole loading data from json code to achieve this. </issue> <code> [start of haystack/document_stores/utils.py] 1 import typing 2 from typing import Dict, List, Optional, Tuple, Union, Generator 3 4 import json 5 import logging 6 from datetime import datetime 7 8 from haystack.schema import Document, Label, Answer, Span 9 from haystack.nodes.preprocessor import PreProcessor 10 11 if typing.TYPE_CHECKING: 12 # This results in a circular import if we don't use typing.TYPE_CHECKING 13 from haystack.document_stores.base import BaseDocumentStore 14 15 16 logger = logging.getLogger(__name__) 17 18 19 def eval_data_from_json( 20 filename: str, 21 max_docs: Optional[Union[int, bool]] = None, 22 preprocessor: Optional[PreProcessor] = None, 23 open_domain: bool = False, 24 ) -> Tuple[List[Document], List[Label]]: 25 """ 26 Read Documents + Labels from a SQuAD-style file. 27 Document and Labels can then be indexed to the DocumentStore and be used for evaluation. 28 29 :param filename: Path to file in SQuAD format 30 :param max_docs: This sets the number of documents that will be loaded. By default, this is set to None, thus reading in all available eval documents. 31 :param open_domain: Set this to True if your file is an open domain dataset where two different answers to the same question might be found in different contexts. 32 """ 33 docs: List[Document] = [] 34 labels = [] 35 problematic_ids = [] 36 37 with open(filename, "r", encoding="utf-8") as file: 38 data = json.load(file) 39 if "title" not in data["data"][0]: 40 logger.warning("No title information found for documents in QA file: %s", filename) 41 42 for squad_document in data["data"]: 43 if max_docs and len(docs) > max_docs: 44 break 45 # Extracting paragraphs and their labels from a SQuAD document dict 46 cur_docs, cur_labels, cur_problematic_ids = _extract_docs_and_labels_from_dict( 47 squad_document, preprocessor, open_domain 48 ) 49 docs.extend(cur_docs) 50 labels.extend(cur_labels) 51 problematic_ids.extend(cur_problematic_ids) 52 if len(problematic_ids) > 0: 53 logger.warning( 54 "Could not convert an answer for %s questions.\nThere were conversion errors for question ids: %s", 55 len(problematic_ids), 56 problematic_ids, 57 ) 58 return docs, labels 59 60 61 def eval_data_from_jsonl( 62 filename: str, 63 batch_size: Optional[int] = None, 64 max_docs: Optional[Union[int, bool]] = None, 65 preprocessor: Optional[PreProcessor] = None, 66 open_domain: bool = False, 67 ) -> Generator[Tuple[List[Document], List[Label]], None, None]: 68 """ 69 Read Documents + Labels from a SQuAD-style file in jsonl format, i.e. one document per line. 70 Document and Labels can then be indexed to the DocumentStore and be used for evaluation. 71 72 This is a generator which will yield one tuple per iteration containing a list 73 of batch_size documents and a list with the documents' labels. 74 If batch_size is set to None, this method will yield all documents and labels. 75 76 :param filename: Path to file in SQuAD format 77 :param max_docs: This sets the number of documents that will be loaded. By default, this is set to None, thus reading in all available eval documents. 78 :param open_domain: Set this to True if your file is an open domain dataset where two different answers to the same question might be found in different contexts. 79 """ 80 docs: List[Document] = [] 81 labels = [] 82 problematic_ids = [] 83 84 with open(filename, "r", encoding="utf-8") as file: 85 for document in file: 86 if max_docs and len(docs) > max_docs: 87 break 88 # Extracting paragraphs and their labels from a SQuAD document dict 89 squad_document = json.loads(document) 90 cur_docs, cur_labels, cur_problematic_ids = _extract_docs_and_labels_from_dict( 91 squad_document, preprocessor, open_domain 92 ) 93 docs.extend(cur_docs) 94 labels.extend(cur_labels) 95 problematic_ids.extend(cur_problematic_ids) 96 97 if batch_size is not None and len(docs) >= batch_size: 98 if len(problematic_ids) > 0: 99 logger.warning( 100 "Could not convert an answer for %s questions.\n" 101 "There were conversion errors for question ids: %s", 102 len(problematic_ids), 103 problematic_ids, 104 ) 105 yield docs, labels 106 docs = [] 107 labels = [] 108 problematic_ids = [] 109 110 yield docs, labels 111 112 113 def squad_json_to_jsonl(squad_file: str, output_file: str): 114 """ 115 Converts a SQuAD-json-file into jsonl format with one document per line. 116 117 :param squad_file: SQuAD-file in json format. 118 :param output_file: Name of output file (SQuAD in jsonl format) 119 """ 120 with open(squad_file, encoding="utf-8") as json_file, open(output_file, "w", encoding="utf-8") as jsonl_file: 121 squad_json = json.load(json_file) 122 123 for doc in squad_json["data"]: 124 json.dump(doc, jsonl_file) 125 jsonl_file.write("\n") 126 127 128 def _extract_docs_and_labels_from_dict( 129 document_dict: Dict, preprocessor: Optional[PreProcessor] = None, open_domain: bool = False 130 ): 131 """ 132 Set open_domain to True if you are trying to load open_domain labels (i.e. labels without doc id or start idx) 133 """ 134 docs = [] 135 labels = [] 136 problematic_ids = [] 137 138 # get all extra fields from document level (e.g. title) 139 meta_doc = {k: v for k, v in document_dict.items() if k not in ("paragraphs", "title")} 140 for paragraph in document_dict["paragraphs"]: 141 ## Create Metadata 142 cur_meta = {"name": document_dict.get("title", None)} 143 # all other fields from paragraph level 144 meta_paragraph = {k: v for k, v in paragraph.items() if k not in ("qas", "context")} 145 cur_meta.update(meta_paragraph) 146 # meta from parent document 147 cur_meta.update(meta_doc) 148 149 ## Create Document 150 cur_full_doc = Document(content=paragraph["context"], meta=cur_meta) 151 if preprocessor is not None: 152 splits_docs = preprocessor.process(documents=[cur_full_doc]) 153 # we need to pull in _split_id into the document id for unique reference in labels 154 splits: List[Document] = [] 155 offset = 0 156 for d in splits_docs: 157 id = f"{d.id}-{d.meta['_split_id']}" 158 d.meta["_split_offset"] = offset 159 offset += len(d.content) 160 # offset correction based on splitting method 161 if preprocessor.split_by == "word": 162 offset += 1 163 elif preprocessor.split_by == "passage": 164 offset += 2 165 else: 166 raise NotImplementedError 167 mydoc = Document(content=d.content, id=id, meta=d.meta) 168 splits.append(mydoc) 169 else: 170 splits = [cur_full_doc] 171 docs.extend(splits) 172 173 ## Assign Labels to corresponding documents 174 for qa in paragraph["qas"]: 175 if not qa.get("is_impossible", False): 176 for answer in qa["answers"]: 177 ans = answer["text"] 178 # TODO The following block of code means that answer_start is never calculated 179 # and cur_id is always None for open_domain 180 # This can be rewritten so that this function could try to calculate offsets 181 # and populate id in open_domain mode 182 if open_domain: 183 # TODO check with Branden why we want to treat open_domain here differently. 184 # Shouldn't this be something configured at eval time only? 185 cur_ans_start = answer.get("answer_start", 0) 186 # cur_id = '0' 187 label = Label( 188 query=qa["question"], 189 answer=Answer(answer=ans, type="extractive", score=0.0), 190 document=None, # type: ignore 191 is_correct_answer=True, 192 is_correct_document=True, 193 origin="gold-label", 194 ) 195 labels.append(label) 196 else: 197 ans_position = cur_full_doc.content[ 198 answer["answer_start"] : answer["answer_start"] + len(str(ans)) 199 ] 200 if ans != ans_position: 201 # do not use answer 202 problematic_ids.append(qa.get("id", "missing")) 203 break 204 # find corresponding document or split 205 if len(splits) == 1: 206 # cur_id = splits[0].id 207 cur_ans_start = answer["answer_start"] 208 cur_doc = splits[0] 209 else: 210 for s in splits: 211 # If answer start offset is contained in passage we assign the label to that passage 212 if (answer["answer_start"] >= s.meta["_split_offset"]) and ( 213 answer["answer_start"] < (s.meta["_split_offset"] + len(s.content)) 214 ): 215 cur_doc = s 216 cur_ans_start = answer["answer_start"] - s.meta["_split_offset"] 217 # If a document is splitting an answer we add the whole answer text to the document 218 if s.content[cur_ans_start : cur_ans_start + len(ans)] != ans: 219 s.content = s.content[:cur_ans_start] + ans 220 break 221 cur_answer = Answer( 222 answer=ans, 223 type="extractive", 224 score=0.0, 225 context=cur_doc.content, 226 offsets_in_document=[Span(start=cur_ans_start, end=cur_ans_start + len(ans))], 227 offsets_in_context=[Span(start=cur_ans_start, end=cur_ans_start + len(ans))], 228 document_ids=[cur_doc.id], 229 ) 230 label = Label( 231 query=qa["question"], 232 answer=cur_answer, 233 document=cur_doc, 234 is_correct_answer=True, 235 is_correct_document=True, 236 origin="gold-label", 237 ) 238 labels.append(label) 239 else: 240 # for no_answer we need to assign each split as not fitting to the question 241 for s in splits: 242 label = Label( 243 query=qa["question"], 244 answer=Answer( 245 answer="", 246 type="extractive", 247 score=0.0, 248 offsets_in_document=[Span(start=0, end=0)], 249 offsets_in_context=[Span(start=0, end=0)], 250 ), 251 document=s, 252 is_correct_answer=True, 253 is_correct_document=True, 254 origin="gold-label", 255 ) 256 257 labels.append(label) 258 259 return docs, labels, problematic_ids 260 261 262 def convert_date_to_rfc3339(date: str) -> str: 263 """ 264 Converts a date to RFC3339 format, as Weaviate requires dates to be in RFC3339 format including the time and 265 timezone. 266 267 If the provided date string does not contain a time and/or timezone, we use 00:00 as default time 268 and UTC as default time zone. 269 270 This method cannot be part of WeaviateDocumentStore, as this would result in a circular import between weaviate.py 271 and filter_utils.py. 272 """ 273 parsed_datetime = datetime.fromisoformat(date) 274 if parsed_datetime.utcoffset() is None: 275 converted_date = parsed_datetime.isoformat() + "Z" 276 else: 277 converted_date = parsed_datetime.isoformat() 278 279 return converted_date 280 [end of haystack/document_stores/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/haystack/document_stores/utils.py b/haystack/document_stores/utils.py --- a/haystack/document_stores/utils.py +++ b/haystack/document_stores/utils.py @@ -172,6 +172,9 @@ ## Assign Labels to corresponding documents for qa in paragraph["qas"]: + meta_qa = { + k: v for k, v in qa.items() if k not in ("is_impossible", "answers", "question", "id", "missing") + } if not qa.get("is_impossible", False): for answer in qa["answers"]: ans = answer["text"] @@ -191,6 +194,7 @@ is_correct_answer=True, is_correct_document=True, origin="gold-label", + meta=meta_qa, ) labels.append(label) else: @@ -234,6 +238,7 @@ is_correct_answer=True, is_correct_document=True, origin="gold-label", + meta=meta_qa, ) labels.append(label) else: @@ -252,6 +257,7 @@ is_correct_answer=True, is_correct_document=True, origin="gold-label", + meta=meta_qa, ) labels.append(label)
{"golden_diff": "diff --git a/haystack/document_stores/utils.py b/haystack/document_stores/utils.py\n--- a/haystack/document_stores/utils.py\n+++ b/haystack/document_stores/utils.py\n@@ -172,6 +172,9 @@\n \n ## Assign Labels to corresponding documents\n for qa in paragraph[\"qas\"]:\n+ meta_qa = {\n+ k: v for k, v in qa.items() if k not in (\"is_impossible\", \"answers\", \"question\", \"id\", \"missing\")\n+ }\n if not qa.get(\"is_impossible\", False):\n for answer in qa[\"answers\"]:\n ans = answer[\"text\"]\n@@ -191,6 +194,7 @@\n is_correct_answer=True,\n is_correct_document=True,\n origin=\"gold-label\",\n+ meta=meta_qa,\n )\n labels.append(label)\n else:\n@@ -234,6 +238,7 @@\n is_correct_answer=True,\n is_correct_document=True,\n origin=\"gold-label\",\n+ meta=meta_qa,\n )\n labels.append(label)\n else:\n@@ -252,6 +257,7 @@\n is_correct_answer=True,\n is_correct_document=True,\n origin=\"gold-label\",\n+ meta=meta_qa,\n )\n \n labels.append(label)\n", "issue": "Load additional fields from SQUAD-format file to meta field for labels\n**Is your feature request related to a problem? Please describe.**\r\nCurrently `eval_data_from_json` loads additional fields for Documents as meta field but it doesn't do it for Labels.\r\n\r\n**Describe the solution you'd like**\r\n`eval_data_from_json` loads additional fields to `Label.meta` dict analogically to how it's done for Documents.\r\n\r\n**Describe alternatives you've considered**\r\nCurrently I'd would have to rewrite whole loading data from json code to achieve this.\r\n\n", "before_files": [{"content": "import typing\nfrom typing import Dict, List, Optional, Tuple, Union, Generator\n\nimport json\nimport logging\nfrom datetime import datetime\n\nfrom haystack.schema import Document, Label, Answer, Span\nfrom haystack.nodes.preprocessor import PreProcessor\n\nif typing.TYPE_CHECKING:\n # This results in a circular import if we don't use typing.TYPE_CHECKING\n from haystack.document_stores.base import BaseDocumentStore\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef eval_data_from_json(\n filename: str,\n max_docs: Optional[Union[int, bool]] = None,\n preprocessor: Optional[PreProcessor] = None,\n open_domain: bool = False,\n) -> Tuple[List[Document], List[Label]]:\n \"\"\"\n Read Documents + Labels from a SQuAD-style file.\n Document and Labels can then be indexed to the DocumentStore and be used for evaluation.\n\n :param filename: Path to file in SQuAD format\n :param max_docs: This sets the number of documents that will be loaded. By default, this is set to None, thus reading in all available eval documents.\n :param open_domain: Set this to True if your file is an open domain dataset where two different answers to the same question might be found in different contexts.\n \"\"\"\n docs: List[Document] = []\n labels = []\n problematic_ids = []\n\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n data = json.load(file)\n if \"title\" not in data[\"data\"][0]:\n logger.warning(\"No title information found for documents in QA file: %s\", filename)\n\n for squad_document in data[\"data\"]:\n if max_docs and len(docs) > max_docs:\n break\n # Extracting paragraphs and their labels from a SQuAD document dict\n cur_docs, cur_labels, cur_problematic_ids = _extract_docs_and_labels_from_dict(\n squad_document, preprocessor, open_domain\n )\n docs.extend(cur_docs)\n labels.extend(cur_labels)\n problematic_ids.extend(cur_problematic_ids)\n if len(problematic_ids) > 0:\n logger.warning(\n \"Could not convert an answer for %s questions.\\nThere were conversion errors for question ids: %s\",\n len(problematic_ids),\n problematic_ids,\n )\n return docs, labels\n\n\ndef eval_data_from_jsonl(\n filename: str,\n batch_size: Optional[int] = None,\n max_docs: Optional[Union[int, bool]] = None,\n preprocessor: Optional[PreProcessor] = None,\n open_domain: bool = False,\n) -> Generator[Tuple[List[Document], List[Label]], None, None]:\n \"\"\"\n Read Documents + Labels from a SQuAD-style file in jsonl format, i.e. one document per line.\n Document and Labels can then be indexed to the DocumentStore and be used for evaluation.\n\n This is a generator which will yield one tuple per iteration containing a list\n of batch_size documents and a list with the documents' labels.\n If batch_size is set to None, this method will yield all documents and labels.\n\n :param filename: Path to file in SQuAD format\n :param max_docs: This sets the number of documents that will be loaded. By default, this is set to None, thus reading in all available eval documents.\n :param open_domain: Set this to True if your file is an open domain dataset where two different answers to the same question might be found in different contexts.\n \"\"\"\n docs: List[Document] = []\n labels = []\n problematic_ids = []\n\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n for document in file:\n if max_docs and len(docs) > max_docs:\n break\n # Extracting paragraphs and their labels from a SQuAD document dict\n squad_document = json.loads(document)\n cur_docs, cur_labels, cur_problematic_ids = _extract_docs_and_labels_from_dict(\n squad_document, preprocessor, open_domain\n )\n docs.extend(cur_docs)\n labels.extend(cur_labels)\n problematic_ids.extend(cur_problematic_ids)\n\n if batch_size is not None and len(docs) >= batch_size:\n if len(problematic_ids) > 0:\n logger.warning(\n \"Could not convert an answer for %s questions.\\n\"\n \"There were conversion errors for question ids: %s\",\n len(problematic_ids),\n problematic_ids,\n )\n yield docs, labels\n docs = []\n labels = []\n problematic_ids = []\n\n yield docs, labels\n\n\ndef squad_json_to_jsonl(squad_file: str, output_file: str):\n \"\"\"\n Converts a SQuAD-json-file into jsonl format with one document per line.\n\n :param squad_file: SQuAD-file in json format.\n :param output_file: Name of output file (SQuAD in jsonl format)\n \"\"\"\n with open(squad_file, encoding=\"utf-8\") as json_file, open(output_file, \"w\", encoding=\"utf-8\") as jsonl_file:\n squad_json = json.load(json_file)\n\n for doc in squad_json[\"data\"]:\n json.dump(doc, jsonl_file)\n jsonl_file.write(\"\\n\")\n\n\ndef _extract_docs_and_labels_from_dict(\n document_dict: Dict, preprocessor: Optional[PreProcessor] = None, open_domain: bool = False\n):\n \"\"\"\n Set open_domain to True if you are trying to load open_domain labels (i.e. labels without doc id or start idx)\n \"\"\"\n docs = []\n labels = []\n problematic_ids = []\n\n # get all extra fields from document level (e.g. title)\n meta_doc = {k: v for k, v in document_dict.items() if k not in (\"paragraphs\", \"title\")}\n for paragraph in document_dict[\"paragraphs\"]:\n ## Create Metadata\n cur_meta = {\"name\": document_dict.get(\"title\", None)}\n # all other fields from paragraph level\n meta_paragraph = {k: v for k, v in paragraph.items() if k not in (\"qas\", \"context\")}\n cur_meta.update(meta_paragraph)\n # meta from parent document\n cur_meta.update(meta_doc)\n\n ## Create Document\n cur_full_doc = Document(content=paragraph[\"context\"], meta=cur_meta)\n if preprocessor is not None:\n splits_docs = preprocessor.process(documents=[cur_full_doc])\n # we need to pull in _split_id into the document id for unique reference in labels\n splits: List[Document] = []\n offset = 0\n for d in splits_docs:\n id = f\"{d.id}-{d.meta['_split_id']}\"\n d.meta[\"_split_offset\"] = offset\n offset += len(d.content)\n # offset correction based on splitting method\n if preprocessor.split_by == \"word\":\n offset += 1\n elif preprocessor.split_by == \"passage\":\n offset += 2\n else:\n raise NotImplementedError\n mydoc = Document(content=d.content, id=id, meta=d.meta)\n splits.append(mydoc)\n else:\n splits = [cur_full_doc]\n docs.extend(splits)\n\n ## Assign Labels to corresponding documents\n for qa in paragraph[\"qas\"]:\n if not qa.get(\"is_impossible\", False):\n for answer in qa[\"answers\"]:\n ans = answer[\"text\"]\n # TODO The following block of code means that answer_start is never calculated\n # and cur_id is always None for open_domain\n # This can be rewritten so that this function could try to calculate offsets\n # and populate id in open_domain mode\n if open_domain:\n # TODO check with Branden why we want to treat open_domain here differently.\n # Shouldn't this be something configured at eval time only?\n cur_ans_start = answer.get(\"answer_start\", 0)\n # cur_id = '0'\n label = Label(\n query=qa[\"question\"],\n answer=Answer(answer=ans, type=\"extractive\", score=0.0),\n document=None, # type: ignore\n is_correct_answer=True,\n is_correct_document=True,\n origin=\"gold-label\",\n )\n labels.append(label)\n else:\n ans_position = cur_full_doc.content[\n answer[\"answer_start\"] : answer[\"answer_start\"] + len(str(ans))\n ]\n if ans != ans_position:\n # do not use answer\n problematic_ids.append(qa.get(\"id\", \"missing\"))\n break\n # find corresponding document or split\n if len(splits) == 1:\n # cur_id = splits[0].id\n cur_ans_start = answer[\"answer_start\"]\n cur_doc = splits[0]\n else:\n for s in splits:\n # If answer start offset is contained in passage we assign the label to that passage\n if (answer[\"answer_start\"] >= s.meta[\"_split_offset\"]) and (\n answer[\"answer_start\"] < (s.meta[\"_split_offset\"] + len(s.content))\n ):\n cur_doc = s\n cur_ans_start = answer[\"answer_start\"] - s.meta[\"_split_offset\"]\n # If a document is splitting an answer we add the whole answer text to the document\n if s.content[cur_ans_start : cur_ans_start + len(ans)] != ans:\n s.content = s.content[:cur_ans_start] + ans\n break\n cur_answer = Answer(\n answer=ans,\n type=\"extractive\",\n score=0.0,\n context=cur_doc.content,\n offsets_in_document=[Span(start=cur_ans_start, end=cur_ans_start + len(ans))],\n offsets_in_context=[Span(start=cur_ans_start, end=cur_ans_start + len(ans))],\n document_ids=[cur_doc.id],\n )\n label = Label(\n query=qa[\"question\"],\n answer=cur_answer,\n document=cur_doc,\n is_correct_answer=True,\n is_correct_document=True,\n origin=\"gold-label\",\n )\n labels.append(label)\n else:\n # for no_answer we need to assign each split as not fitting to the question\n for s in splits:\n label = Label(\n query=qa[\"question\"],\n answer=Answer(\n answer=\"\",\n type=\"extractive\",\n score=0.0,\n offsets_in_document=[Span(start=0, end=0)],\n offsets_in_context=[Span(start=0, end=0)],\n ),\n document=s,\n is_correct_answer=True,\n is_correct_document=True,\n origin=\"gold-label\",\n )\n\n labels.append(label)\n\n return docs, labels, problematic_ids\n\n\ndef convert_date_to_rfc3339(date: str) -> str:\n \"\"\"\n Converts a date to RFC3339 format, as Weaviate requires dates to be in RFC3339 format including the time and\n timezone.\n\n If the provided date string does not contain a time and/or timezone, we use 00:00 as default time\n and UTC as default time zone.\n\n This method cannot be part of WeaviateDocumentStore, as this would result in a circular import between weaviate.py\n and filter_utils.py.\n \"\"\"\n parsed_datetime = datetime.fromisoformat(date)\n if parsed_datetime.utcoffset() is None:\n converted_date = parsed_datetime.isoformat() + \"Z\"\n else:\n converted_date = parsed_datetime.isoformat()\n\n return converted_date\n", "path": "haystack/document_stores/utils.py"}]}
3,848
295
gh_patches_debug_64706
rasdani/github-patches
git_diff
ansible__ansible-modules-extras-3417
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ec2_lc_find not returning AssociatePublicIpAddress ##### ISSUE TYPE Bug Report ##### COMPONENT NAME ec2_lc_find ##### ANSIBLE VERSION ``` ansible 2.2.0.0 config file = /home/centos/ansiblebase/ansible.cfg configured module search path = Default w/o overrides ``` ##### CONFIGURATION No significant changes ##### OS / ENVIRONMENT Started with Ansible Tower 3.0.3 on CentOS 7 x86_64 Did a yum update on ansible to 2.2. Did pip install boto3. ##### SUMMARY Running ec2_lc_find fails with a missing key for AssociatePublicIpAddress ##### STEPS TO REPRODUCE ``` - ec2_lc_find: region: "{{ region }}" name_regex: lc_name-*" sort_order: ascending limit: 3 register: old_lc_result ``` <!--- You can also paste gist.github.com links for larger files --> ##### EXPECTED RESULTS Correctly returns load configurations matching regex. ##### ACTUAL RESULTS ``` An exception occurred during task execution. To see the full traceback, use -vvv. The error was: KeyError: 'AssociatePublicIpAddress' fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\", line 225, in <module>\n main()\n File \"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\", line 217, in main\n find_launch_configs(client, module)\n File \"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\", line 187, in find_launch_configs\n 'associate_public_address': lc['AssociatePublicIpAddress'],\nKeyError: 'AssociatePublicIpAddress'\n", "module_stdout": "", "msg": "MODULE FAILURE"} ``` </issue> <code> [start of cloud/amazon/ec2_lc_find.py] 1 #!/usr/bin/python 2 # encoding: utf-8 3 4 # (c) 2015, Jose Armesto <jose@armesto.net> 5 # 6 # This file is part of Ansible 7 # 8 # This module is free software: you can redistribute it and/or modify 9 # it under the terms of the GNU General Public License as published by 10 # the Free Software Foundation, either version 3 of the License, or 11 # (at your option) any later version. 12 # 13 # This software is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU General Public License for more details. 17 # 18 # You should have received a copy of the GNU General Public License 19 # along with this software. If not, see <http://www.gnu.org/licenses/>. 20 21 DOCUMENTATION = """ 22 --- 23 module: ec2_lc_find 24 short_description: Find AWS Autoscaling Launch Configurations 25 description: 26 - Returns list of matching Launch Configurations for a given name, along with other useful information 27 - Results can be sorted and sliced 28 - It depends on boto 29 - Based on the work by Tom Bamford (https://github.com/tombamford) 30 31 version_added: "2.2" 32 author: "Jose Armesto (@fiunchinho)" 33 options: 34 region: 35 description: 36 - The AWS region to use. 37 required: true 38 aliases: ['aws_region', 'ec2_region'] 39 name_regex: 40 description: 41 - A Launch Configuration to match 42 - It'll be compiled as regex 43 required: True 44 sort_order: 45 description: 46 - Order in which to sort results. 47 choices: ['ascending', 'descending'] 48 default: 'ascending' 49 required: false 50 limit: 51 description: 52 - How many results to show. 53 - Corresponds to Python slice notation like list[:limit]. 54 default: null 55 required: false 56 requirements: 57 - "python >= 2.6" 58 - boto3 59 """ 60 61 EXAMPLES = ''' 62 # Note: These examples do not set authentication details, see the AWS Guide for details. 63 64 # Search for the Launch Configurations that start with "app" 65 - ec2_lc_find: 66 name_regex: app.* 67 sort_order: descending 68 limit: 2 69 ''' 70 71 RETURN = ''' 72 image_id: 73 description: AMI id 74 returned: when Launch Configuration was found 75 type: string 76 sample: "ami-0d75df7e" 77 user_data: 78 description: User data used to start instance 79 returned: when Launch Configuration was found 80 type: string 81 user_data: "ZXhwb3J0IENMT1VE" 82 name: 83 description: Name of the AMI 84 returned: when Launch Configuration was found 85 type: string 86 sample: "myapp-v123" 87 arn: 88 description: Name of the AMI 89 returned: when Launch Configuration was found 90 type: string 91 sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject" 92 instance_type: 93 description: Type of ec2 instance 94 returned: when Launch Configuration was found 95 type: string 96 sample: "t2.small" 97 created_time: 98 description: When it was created 99 returned: when Launch Configuration was found 100 type: string 101 sample: "2016-06-29T14:59:22.222000+00:00" 102 ebs_optimized: 103 description: Launch Configuration EBS optimized property 104 returned: when Launch Configuration was found 105 type: boolean 106 sample: False 107 instance_monitoring: 108 description: Launch Configuration instance monitoring property 109 returned: when Launch Configuration was found 110 type: string 111 sample: {"Enabled": false} 112 classic_link_vpc_security_groups: 113 description: Launch Configuration classic link vpc security groups property 114 returned: when Launch Configuration was found 115 type: list 116 sample: [] 117 block_device_mappings: 118 description: Launch Configuration block device mappings property 119 returned: when Launch Configuration was found 120 type: list 121 sample: [] 122 keyname: 123 description: Launch Configuration ssh key 124 returned: when Launch Configuration was found 125 type: string 126 sample: mykey 127 security_groups: 128 description: Launch Configuration security groups 129 returned: when Launch Configuration was found 130 type: list 131 sample: [] 132 kernel_id: 133 description: Launch Configuration kernel to use 134 returned: when Launch Configuration was found 135 type: string 136 sample: '' 137 ram_disk_id: 138 description: Launch Configuration ram disk property 139 returned: when Launch Configuration was found 140 type: string 141 sample: '' 142 associate_public_address: 143 description: Assign public address or not 144 returned: when Launch Configuration was found 145 type: boolean 146 sample: True 147 ... 148 ''' 149 150 151 def find_launch_configs(client, module): 152 name_regex = module.params.get('name_regex') 153 sort_order = module.params.get('sort_order') 154 limit = module.params.get('limit') 155 156 paginator = client.get_paginator('describe_launch_configurations') 157 158 response_iterator = paginator.paginate( 159 PaginationConfig={ 160 'MaxItems': 1000, 161 'PageSize': 100 162 } 163 ) 164 165 results = [] 166 167 for response in response_iterator: 168 response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']), 169 response['LaunchConfigurations']) 170 171 for lc in response['LaunchConfigurations']: 172 data = { 173 'name': lc['LaunchConfigurationName'], 174 'arn': lc['LaunchConfigurationARN'], 175 'created_time': lc['CreatedTime'], 176 'user_data': lc['UserData'], 177 'instance_type': lc['InstanceType'], 178 'image_id': lc['ImageId'], 179 'ebs_optimized': lc['EbsOptimized'], 180 'instance_monitoring': lc['InstanceMonitoring'], 181 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'], 182 'block_device_mappings': lc['BlockDeviceMappings'], 183 'keyname': lc['KeyName'], 184 'security_groups': lc['SecurityGroups'], 185 'kernel_id': lc['KernelId'], 186 'ram_disk_id': lc['RamdiskId'], 187 'associate_public_address': lc['AssociatePublicIpAddress'], 188 } 189 190 results.append(data) 191 192 results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending')) 193 194 if limit: 195 results = results[:int(limit)] 196 197 module.exit_json(changed=False, results=results) 198 199 200 def main(): 201 argument_spec = ec2_argument_spec() 202 argument_spec.update(dict( 203 region=dict(required=True, aliases=['aws_region', 'ec2_region']), 204 name_regex=dict(required=True), 205 sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']), 206 limit=dict(required=False, type='int'), 207 ) 208 ) 209 210 module = AnsibleModule( 211 argument_spec=argument_spec, 212 ) 213 214 region, ec2_url, aws_connect_params = get_aws_connection_info(module, True) 215 216 client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params) 217 find_launch_configs(client, module) 218 219 220 # import module snippets 221 from ansible.module_utils.basic import * 222 from ansible.module_utils.ec2 import * 223 224 if __name__ == '__main__': 225 main() 226 [end of cloud/amazon/ec2_lc_find.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cloud/amazon/ec2_lc_find.py b/cloud/amazon/ec2_lc_find.py --- a/cloud/amazon/ec2_lc_find.py +++ b/cloud/amazon/ec2_lc_find.py @@ -184,7 +184,7 @@ 'security_groups': lc['SecurityGroups'], 'kernel_id': lc['KernelId'], 'ram_disk_id': lc['RamdiskId'], - 'associate_public_address': lc['AssociatePublicIpAddress'], + 'associate_public_address': lc.get('AssociatePublicIpAddress', False), } results.append(data)
{"golden_diff": "diff --git a/cloud/amazon/ec2_lc_find.py b/cloud/amazon/ec2_lc_find.py\n--- a/cloud/amazon/ec2_lc_find.py\n+++ b/cloud/amazon/ec2_lc_find.py\n@@ -184,7 +184,7 @@\n 'security_groups': lc['SecurityGroups'],\n 'kernel_id': lc['KernelId'],\n 'ram_disk_id': lc['RamdiskId'],\n- 'associate_public_address': lc['AssociatePublicIpAddress'],\n+ 'associate_public_address': lc.get('AssociatePublicIpAddress', False),\n }\n \n results.append(data)\n", "issue": "ec2_lc_find not returning AssociatePublicIpAddress\n##### ISSUE TYPE\r\nBug Report\r\n\r\n##### COMPONENT NAME\r\nec2_lc_find\r\n\r\n##### ANSIBLE VERSION\r\n```\r\nansible 2.2.0.0\r\n config file = /home/centos/ansiblebase/ansible.cfg\r\n configured module search path = Default w/o overrides\r\n```\r\n\r\n##### CONFIGURATION\r\nNo significant changes\r\n\r\n##### OS / ENVIRONMENT\r\nStarted with Ansible Tower 3.0.3 on CentOS 7 x86_64\r\nDid a yum update on ansible to 2.2.\r\nDid pip install boto3.\r\n\r\n##### SUMMARY\r\nRunning ec2_lc_find fails with a missing key for AssociatePublicIpAddress\r\n\r\n##### STEPS TO REPRODUCE\r\n```\r\n- ec2_lc_find:\r\n region: \"{{ region }}\"\r\n name_regex: lc_name-*\"\r\n sort_order: ascending\r\n limit: 3\r\n register: old_lc_result\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\nCorrectly returns load configurations matching regex.\r\n\r\n##### ACTUAL RESULTS\r\n```\r\nAn exception occurred during task execution. To see the full traceback, use -vvv. The error was: KeyError: 'AssociatePublicIpAddress'\r\nfatal: [localhost]: FAILED! => {\"changed\": false, \"failed\": true, \"module_stderr\": \"Traceback (most recent call last):\\n File \\\"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\\\", line 225, in <module>\\n main()\\n File \\\"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\\\", line 217, in main\\n find_launch_configs(client, module)\\n File \\\"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\\\", line 187, in find_launch_configs\\n 'associate_public_address': lc['AssociatePublicIpAddress'],\\nKeyError: 'AssociatePublicIpAddress'\\n\", \"module_stdout\": \"\", \"msg\": \"MODULE FAILURE\"}\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# encoding: utf-8\n\n# (c) 2015, Jose Armesto <jose@armesto.net>\n#\n# This file is part of Ansible\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = \"\"\"\n---\nmodule: ec2_lc_find\nshort_description: Find AWS Autoscaling Launch Configurations\ndescription:\n - Returns list of matching Launch Configurations for a given name, along with other useful information\n - Results can be sorted and sliced\n - It depends on boto\n - Based on the work by Tom Bamford (https://github.com/tombamford)\n\nversion_added: \"2.2\"\nauthor: \"Jose Armesto (@fiunchinho)\"\noptions:\n region:\n description:\n - The AWS region to use.\n required: true\n aliases: ['aws_region', 'ec2_region']\n name_regex:\n description:\n - A Launch Configuration to match\n - It'll be compiled as regex\n required: True\n sort_order:\n description:\n - Order in which to sort results.\n choices: ['ascending', 'descending']\n default: 'ascending'\n required: false\n limit:\n description:\n - How many results to show.\n - Corresponds to Python slice notation like list[:limit].\n default: null\n required: false\nrequirements:\n - \"python >= 2.6\"\n - boto3\n\"\"\"\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Search for the Launch Configurations that start with \"app\"\n- ec2_lc_find:\n name_regex: app.*\n sort_order: descending\n limit: 2\n'''\n\nRETURN = '''\nimage_id:\n description: AMI id\n returned: when Launch Configuration was found\n type: string\n sample: \"ami-0d75df7e\"\nuser_data:\n description: User data used to start instance\n returned: when Launch Configuration was found\n type: string\n user_data: \"ZXhwb3J0IENMT1VE\"\nname:\n description: Name of the AMI\n returned: when Launch Configuration was found\n type: string\n sample: \"myapp-v123\"\narn:\n description: Name of the AMI\n returned: when Launch Configuration was found\n type: string\n sample: \"arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject\"\ninstance_type:\n description: Type of ec2 instance\n returned: when Launch Configuration was found\n type: string\n sample: \"t2.small\"\ncreated_time:\n description: When it was created\n returned: when Launch Configuration was found\n type: string\n sample: \"2016-06-29T14:59:22.222000+00:00\"\nebs_optimized:\n description: Launch Configuration EBS optimized property\n returned: when Launch Configuration was found\n type: boolean\n sample: False\ninstance_monitoring:\n description: Launch Configuration instance monitoring property\n returned: when Launch Configuration was found\n type: string\n sample: {\"Enabled\": false}\nclassic_link_vpc_security_groups:\n description: Launch Configuration classic link vpc security groups property\n returned: when Launch Configuration was found\n type: list\n sample: []\nblock_device_mappings:\n description: Launch Configuration block device mappings property\n returned: when Launch Configuration was found\n type: list\n sample: []\nkeyname:\n description: Launch Configuration ssh key\n returned: when Launch Configuration was found\n type: string\n sample: mykey\nsecurity_groups:\n description: Launch Configuration security groups\n returned: when Launch Configuration was found\n type: list\n sample: []\nkernel_id:\n description: Launch Configuration kernel to use\n returned: when Launch Configuration was found\n type: string\n sample: ''\nram_disk_id:\n description: Launch Configuration ram disk property\n returned: when Launch Configuration was found\n type: string\n sample: ''\nassociate_public_address:\n description: Assign public address or not\n returned: when Launch Configuration was found\n type: boolean\n sample: True\n...\n'''\n\n\ndef find_launch_configs(client, module):\n name_regex = module.params.get('name_regex')\n sort_order = module.params.get('sort_order')\n limit = module.params.get('limit')\n\n paginator = client.get_paginator('describe_launch_configurations')\n\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 1000,\n 'PageSize': 100\n }\n )\n\n results = []\n\n for response in response_iterator:\n response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),\n response['LaunchConfigurations'])\n\n for lc in response['LaunchConfigurations']:\n data = {\n 'name': lc['LaunchConfigurationName'],\n 'arn': lc['LaunchConfigurationARN'],\n 'created_time': lc['CreatedTime'],\n 'user_data': lc['UserData'],\n 'instance_type': lc['InstanceType'],\n 'image_id': lc['ImageId'],\n 'ebs_optimized': lc['EbsOptimized'],\n 'instance_monitoring': lc['InstanceMonitoring'],\n 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],\n 'block_device_mappings': lc['BlockDeviceMappings'],\n 'keyname': lc['KeyName'],\n 'security_groups': lc['SecurityGroups'],\n 'kernel_id': lc['KernelId'],\n 'ram_disk_id': lc['RamdiskId'],\n 'associate_public_address': lc['AssociatePublicIpAddress'],\n }\n\n results.append(data)\n\n results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))\n\n if limit:\n results = results[:int(limit)]\n\n module.exit_json(changed=False, results=results)\n\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n region=dict(required=True, aliases=['aws_region', 'ec2_region']),\n name_regex=dict(required=True),\n sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),\n limit=dict(required=False, type='int'),\n )\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n )\n\n region, ec2_url, aws_connect_params = get_aws_connection_info(module, True)\n\n client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params)\n find_launch_configs(client, module)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.ec2 import *\n\nif __name__ == '__main__':\n main()\n", "path": "cloud/amazon/ec2_lc_find.py"}]}
3,194
127
gh_patches_debug_40775
rasdani/github-patches
git_diff
streamlink__streamlink-3662
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> plugins.bfmtv: No playable streams found Hello. for few days, the plugin isn't working anymore /usr/local/bin/streamlink --loglevel debug https://rmcdecouverte.bfmtv.com/mediaplayer-direct/ best [cli][info] streamlink is running as root! Be careful! [cli][debug] OS: Linux-5.8.0-44-generic-x86_64-with-glibc2.29 [cli][debug] Python: 3.8.5 [cli][debug] Streamlink: 2.1.1 [cli][debug] Requests(2.22.0), Socks(1.7.1), Websocket(0.58.0) [cli][debug] Arguments: [cli][debug] url=https://rmcdecouverte.bfmtv.com/mediaplayer-direct/ [cli][debug] stream=['best'] [cli][debug] --loglevel=debug [cli][info] Found matching plugin bfmtv for URL https://rmcdecouverte.bfmtv.com/mediaplayer-direct/ error: No playable streams found on this URL: https://rmcdecouverte.bfmtv.com/mediaplayer-direct/ </issue> <code> [start of src/streamlink/plugins/bfmtv.py] 1 import logging 2 import re 3 4 from streamlink.plugin import Plugin 5 from streamlink.plugins.brightcove import BrightcovePlayer 6 7 log = logging.getLogger(__name__) 8 9 10 class BFMTV(Plugin): 11 _url_re = re.compile(r'https://.+\.(?:bfmtv|01net)\.com') 12 _dailymotion_url = 'https://www.dailymotion.com/embed/video/{}' 13 _brightcove_video_re = re.compile( 14 r'accountid="(?P<account_id>[0-9]+).*?videoid="(?P<video_id>[0-9]+)"', 15 re.DOTALL 16 ) 17 _brightcove_video_alt_re = re.compile( 18 r'data-account="(?P<account_id>[0-9]+).*?data-video-id="(?P<video_id>[0-9]+)"', 19 re.DOTALL 20 ) 21 _embed_video_id_re = re.compile( 22 r'<iframe.*?src=".*?/(?P<video_id>\w+)"', 23 re.DOTALL 24 ) 25 26 @classmethod 27 def can_handle_url(cls, url): 28 return cls._url_re.match(url) is not None 29 30 def _get_streams(self): 31 # Retrieve URL page and search for Brightcove video data 32 res = self.session.http.get(self.url) 33 match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text) 34 if match is not None: 35 account_id = match.group('account_id') 36 log.debug(f'Account ID: {account_id}') 37 video_id = match.group('video_id') 38 log.debug(f'Video ID: {video_id}') 39 player = BrightcovePlayer(self.session, account_id) 40 yield from player.get_streams(video_id) 41 else: 42 # Try to find the Dailymotion video ID 43 match = self._embed_video_id_re.search(res.text) 44 if match is not None: 45 video_id = match.group('video_id') 46 log.debug(f'Video ID: {video_id}') 47 yield from self.session.streams(self._dailymotion_url.format(video_id)).items() 48 49 50 __plugin__ = BFMTV 51 [end of src/streamlink/plugins/bfmtv.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/bfmtv.py b/src/streamlink/plugins/bfmtv.py --- a/src/streamlink/plugins/bfmtv.py +++ b/src/streamlink/plugins/bfmtv.py @@ -1,8 +1,11 @@ import logging import re +from urllib.parse import urljoin, urlparse from streamlink.plugin import Plugin +from streamlink.plugin.api.utils import itertags from streamlink.plugins.brightcove import BrightcovePlayer +from streamlink.stream import HTTPStream log = logging.getLogger(__name__) @@ -22,29 +25,68 @@ r'<iframe.*?src=".*?/(?P<video_id>\w+)"', re.DOTALL ) + _main_js_url_re = re.compile(r'src="([\w/]+/main\.\w+\.js)"') + _js_brightcove_video_re = re.compile( + r'i\?\([A-Z]="[^"]+",y="(?P<video_id>[0-9]+).*"data-account"\s*:\s*"(?P<account_id>[0-9]+)', + ) @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def _get_streams(self): - # Retrieve URL page and search for Brightcove video data res = self.session.http.get(self.url) - match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text) - if match is not None: - account_id = match.group('account_id') + + m = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text) + if m: + account_id = m.group('account_id') log.debug(f'Account ID: {account_id}') - video_id = match.group('video_id') + video_id = m.group('video_id') log.debug(f'Video ID: {video_id}') player = BrightcovePlayer(self.session, account_id) yield from player.get_streams(video_id) - else: - # Try to find the Dailymotion video ID - match = self._embed_video_id_re.search(res.text) - if match is not None: - video_id = match.group('video_id') + return + + # Try to find the Dailymotion video ID + m = self._embed_video_id_re.search(res.text) + if m: + video_id = m.group('video_id') + log.debug(f'Video ID: {video_id}') + yield from self.session.streams(self._dailymotion_url.format(video_id)).items() + return + + # Try the JS for Brightcove video data + m = self._main_js_url_re.search(res.text) + if m: + log.debug(f'JS URL: {urljoin(self.url, m.group(1))}') + res = self.session.http.get(urljoin(self.url, m.group(1))) + m = self._js_brightcove_video_re.search(res.text) + if m: + account_id = m.group('account_id') + log.debug(f'Account ID: {account_id}') + video_id = m.group('video_id') log.debug(f'Video ID: {video_id}') - yield from self.session.streams(self._dailymotion_url.format(video_id)).items() + player = BrightcovePlayer(self.session, account_id) + yield from player.get_streams(video_id) + return + + # Audio Live + audio_url = None + for source in itertags(res.text, 'source'): + url = source.attributes.get('src') + if url: + p_url = urlparse(url) + if p_url.path.endswith(('.mp3')): + audio_url = url + + # Audio VOD + for div in itertags(res.text, 'div'): + if div.attributes.get('class') == 'audio-player': + audio_url = div.attributes.get('data-media-url') + + if audio_url: + yield 'audio', HTTPStream(self.session, audio_url) + return __plugin__ = BFMTV
{"golden_diff": "diff --git a/src/streamlink/plugins/bfmtv.py b/src/streamlink/plugins/bfmtv.py\n--- a/src/streamlink/plugins/bfmtv.py\n+++ b/src/streamlink/plugins/bfmtv.py\n@@ -1,8 +1,11 @@\n import logging\n import re\n+from urllib.parse import urljoin, urlparse\n \n from streamlink.plugin import Plugin\n+from streamlink.plugin.api.utils import itertags\n from streamlink.plugins.brightcove import BrightcovePlayer\n+from streamlink.stream import HTTPStream\n \n log = logging.getLogger(__name__)\n \n@@ -22,29 +25,68 @@\n r'<iframe.*?src=\".*?/(?P<video_id>\\w+)\"',\n re.DOTALL\n )\n+ _main_js_url_re = re.compile(r'src=\"([\\w/]+/main\\.\\w+\\.js)\"')\n+ _js_brightcove_video_re = re.compile(\n+ r'i\\?\\([A-Z]=\"[^\"]+\",y=\"(?P<video_id>[0-9]+).*\"data-account\"\\s*:\\s*\"(?P<account_id>[0-9]+)',\n+ )\n \n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n \n def _get_streams(self):\n- # Retrieve URL page and search for Brightcove video data\n res = self.session.http.get(self.url)\n- match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)\n- if match is not None:\n- account_id = match.group('account_id')\n+\n+ m = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)\n+ if m:\n+ account_id = m.group('account_id')\n log.debug(f'Account ID: {account_id}')\n- video_id = match.group('video_id')\n+ video_id = m.group('video_id')\n log.debug(f'Video ID: {video_id}')\n player = BrightcovePlayer(self.session, account_id)\n yield from player.get_streams(video_id)\n- else:\n- # Try to find the Dailymotion video ID\n- match = self._embed_video_id_re.search(res.text)\n- if match is not None:\n- video_id = match.group('video_id')\n+ return\n+\n+ # Try to find the Dailymotion video ID\n+ m = self._embed_video_id_re.search(res.text)\n+ if m:\n+ video_id = m.group('video_id')\n+ log.debug(f'Video ID: {video_id}')\n+ yield from self.session.streams(self._dailymotion_url.format(video_id)).items()\n+ return\n+\n+ # Try the JS for Brightcove video data\n+ m = self._main_js_url_re.search(res.text)\n+ if m:\n+ log.debug(f'JS URL: {urljoin(self.url, m.group(1))}')\n+ res = self.session.http.get(urljoin(self.url, m.group(1)))\n+ m = self._js_brightcove_video_re.search(res.text)\n+ if m:\n+ account_id = m.group('account_id')\n+ log.debug(f'Account ID: {account_id}')\n+ video_id = m.group('video_id')\n log.debug(f'Video ID: {video_id}')\n- yield from self.session.streams(self._dailymotion_url.format(video_id)).items()\n+ player = BrightcovePlayer(self.session, account_id)\n+ yield from player.get_streams(video_id)\n+ return\n+\n+ # Audio Live\n+ audio_url = None\n+ for source in itertags(res.text, 'source'):\n+ url = source.attributes.get('src')\n+ if url:\n+ p_url = urlparse(url)\n+ if p_url.path.endswith(('.mp3')):\n+ audio_url = url\n+\n+ # Audio VOD\n+ for div in itertags(res.text, 'div'):\n+ if div.attributes.get('class') == 'audio-player':\n+ audio_url = div.attributes.get('data-media-url')\n+\n+ if audio_url:\n+ yield 'audio', HTTPStream(self.session, audio_url)\n+ return\n \n \n __plugin__ = BFMTV\n", "issue": "plugins.bfmtv: No playable streams found\n Hello. for few days, the plugin isn't working anymore\r\n\r\n\r\n/usr/local/bin/streamlink --loglevel debug https://rmcdecouverte.bfmtv.com/mediaplayer-direct/ best\r\n[cli][info] streamlink is running as root! Be careful!\r\n[cli][debug] OS: Linux-5.8.0-44-generic-x86_64-with-glibc2.29\r\n[cli][debug] Python: 3.8.5\r\n[cli][debug] Streamlink: 2.1.1\r\n[cli][debug] Requests(2.22.0), Socks(1.7.1), Websocket(0.58.0)\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://rmcdecouverte.bfmtv.com/mediaplayer-direct/\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin bfmtv for URL https://rmcdecouverte.bfmtv.com/mediaplayer-direct/\r\nerror: No playable streams found on this URL: https://rmcdecouverte.bfmtv.com/mediaplayer-direct/\n", "before_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugins.brightcove import BrightcovePlayer\n\nlog = logging.getLogger(__name__)\n\n\nclass BFMTV(Plugin):\n _url_re = re.compile(r'https://.+\\.(?:bfmtv|01net)\\.com')\n _dailymotion_url = 'https://www.dailymotion.com/embed/video/{}'\n _brightcove_video_re = re.compile(\n r'accountid=\"(?P<account_id>[0-9]+).*?videoid=\"(?P<video_id>[0-9]+)\"',\n re.DOTALL\n )\n _brightcove_video_alt_re = re.compile(\n r'data-account=\"(?P<account_id>[0-9]+).*?data-video-id=\"(?P<video_id>[0-9]+)\"',\n re.DOTALL\n )\n _embed_video_id_re = re.compile(\n r'<iframe.*?src=\".*?/(?P<video_id>\\w+)\"',\n re.DOTALL\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n\n def _get_streams(self):\n # Retrieve URL page and search for Brightcove video data\n res = self.session.http.get(self.url)\n match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)\n if match is not None:\n account_id = match.group('account_id')\n log.debug(f'Account ID: {account_id}')\n video_id = match.group('video_id')\n log.debug(f'Video ID: {video_id}')\n player = BrightcovePlayer(self.session, account_id)\n yield from player.get_streams(video_id)\n else:\n # Try to find the Dailymotion video ID\n match = self._embed_video_id_re.search(res.text)\n if match is not None:\n video_id = match.group('video_id')\n log.debug(f'Video ID: {video_id}')\n yield from self.session.streams(self._dailymotion_url.format(video_id)).items()\n\n\n__plugin__ = BFMTV\n", "path": "src/streamlink/plugins/bfmtv.py"}]}
1,390
955
gh_patches_debug_486
rasdani/github-patches
git_diff
DDMAL__CantusDB-228
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove the "Users Online" section in footer. </issue> <code> [start of django/cantusdb_project/main_app/templatetags/helper_tags.py] 1 import calendar 2 from typing import Union, Optional 3 from django.utils.http import urlencode 4 from django import template 5 from main_app.models import Source 6 from django.utils.safestring import mark_safe 7 8 register = template.Library() 9 10 11 @register.filter(name="month_to_string") 12 def month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]: 13 """Converts month number to textual representation, 3 letters (Jan, Mar, etc)""" 14 if type(value) == int and value in range(1, 13): 15 return calendar.month_abbr[value] 16 else: 17 return value 18 19 20 @register.simple_tag(takes_context=True) 21 def url_add_get_params(context, **kwargs): 22 query = context["request"].GET.copy() 23 query.pop("page", None) 24 query.update(kwargs) 25 return query.urlencode() 26 27 28 @register.simple_tag(takes_context=False) 29 def source_links(): 30 sources = ( 31 Source.objects.filter(public=True, visible=True, segment__id=4063) 32 .exclude(siglum=None) 33 .values("siglum", "id") 34 .order_by("siglum") 35 ) 36 options = "" 37 # <option value="source1">Source 1</option> 38 # <option value="source2">Source 2</option> 39 # <option value="source3">Source 3</option> 40 for source in sources: 41 option_str = ( 42 f"<option value=source/{source['id']}>{source['siglum']}</option>\n" 43 ) 44 options += option_str 45 46 return mark_safe(options) 47 [end of django/cantusdb_project/main_app/templatetags/helper_tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/main_app/templatetags/helper_tags.py b/django/cantusdb_project/main_app/templatetags/helper_tags.py --- a/django/cantusdb_project/main_app/templatetags/helper_tags.py +++ b/django/cantusdb_project/main_app/templatetags/helper_tags.py @@ -44,3 +44,7 @@ options += option_str return mark_safe(options) + +@register.filter(name='has_group') +def has_group(user, group_name): + return user.groups.filter(name=group_name).exists()
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/templatetags/helper_tags.py b/django/cantusdb_project/main_app/templatetags/helper_tags.py\n--- a/django/cantusdb_project/main_app/templatetags/helper_tags.py\n+++ b/django/cantusdb_project/main_app/templatetags/helper_tags.py\n@@ -44,3 +44,7 @@\n options += option_str\n \n return mark_safe(options)\n+\n+@register.filter(name='has_group') \n+def has_group(user, group_name):\n+ return user.groups.filter(name=group_name).exists()\n", "issue": "Remove the \"Users Online\" section in footer.\n\n", "before_files": [{"content": "import calendar\nfrom typing import Union, Optional\nfrom django.utils.http import urlencode\nfrom django import template\nfrom main_app.models import Source\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\n@register.filter(name=\"month_to_string\")\ndef month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:\n \"\"\"Converts month number to textual representation, 3 letters (Jan, Mar, etc)\"\"\"\n if type(value) == int and value in range(1, 13):\n return calendar.month_abbr[value]\n else:\n return value\n\n\n@register.simple_tag(takes_context=True)\ndef url_add_get_params(context, **kwargs):\n query = context[\"request\"].GET.copy()\n query.pop(\"page\", None)\n query.update(kwargs)\n return query.urlencode()\n\n\n@register.simple_tag(takes_context=False)\ndef source_links():\n sources = (\n Source.objects.filter(public=True, visible=True, segment__id=4063)\n .exclude(siglum=None)\n .values(\"siglum\", \"id\")\n .order_by(\"siglum\")\n )\n options = \"\"\n # <option value=\"source1\">Source 1</option>\n # <option value=\"source2\">Source 2</option>\n # <option value=\"source3\">Source 3</option>\n for source in sources:\n option_str = (\n f\"<option value=source/{source['id']}>{source['siglum']}</option>\\n\"\n )\n options += option_str\n\n return mark_safe(options)\n", "path": "django/cantusdb_project/main_app/templatetags/helper_tags.py"}]}
999
137
gh_patches_debug_13979
rasdani/github-patches
git_diff
facebookresearch__fairscale-975
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> the main branch is not compatible with python 3.6, but setup.py only requires ">=3.6" python 3.6 can pip install latest fairscale https://github.com/facebookresearch/fairscale/blob/1bc96fa8c69def6d990e42bfbd75f86146ce29bd/setup.py#L67 but, some code is not compatible with python 3.6 https://github.com/facebookresearch/fairscale/blob/1bc96fa8c69def6d990e42bfbd75f86146ce29bd/fairscale/experimental/nn/ssd_offload.py#L6 and python<3.7 has no dataclasses https://github.com/facebookresearch/fairscale/blob/1bc96fa8c69def6d990e42bfbd75f86146ce29bd/fairscale/nn/data_parallel/fully_sharded_data_parallel.py#L8 </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. 4 # 5 # This source code is licensed under the BSD license found in the 6 # LICENSE file in the root directory of this source tree. 7 8 import os 9 import re 10 11 import setuptools 12 13 this_dir = os.path.dirname(os.path.abspath(__file__)) 14 15 16 def fetch_requirements(): 17 with open("requirements.txt") as f: 18 reqs = f.read().strip().split("\n") 19 return reqs 20 21 22 # https://packaging.python.org/guides/single-sourcing-package-version/ 23 def find_version(version_file_path) -> str: 24 with open(version_file_path) as version_file: 25 version_match = re.search(r"^__version_tuple__ = (.*)", version_file.read(), re.M) 26 if version_match: 27 ver_tup = eval(version_match.group(1)) 28 ver_str = ".".join([str(x) for x in ver_tup]) 29 return ver_str 30 raise RuntimeError("Unable to find version tuple.") 31 32 33 extensions = [] 34 cmdclass = {} 35 36 if os.getenv("BUILD_CUDA_EXTENSIONS", "0") == "1": 37 from torch.utils.cpp_extension import BuildExtension, CUDAExtension 38 39 extensions.extend( 40 [ 41 CUDAExtension( 42 name="fairscale.fused_adam_cuda", 43 include_dirs=[os.path.join(this_dir, "fairscale/clib/fused_adam_cuda")], 44 sources=[ 45 "fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp", 46 "fairscale/clib/fused_adam_cuda/fused_adam_cuda_kernel.cu", 47 ], 48 extra_compile_args={"cxx": ["-O3"], "nvcc": ["-O3", "--use_fast_math"]}, 49 ) 50 ] 51 ) 52 53 cmdclass["build_ext"] = BuildExtension 54 55 56 if __name__ == "__main__": 57 setuptools.setup( 58 name="fairscale", 59 description="FairScale: A PyTorch library for large-scale and high-performance training.", 60 version=find_version("fairscale/version.py"), 61 setup_requires=["ninja"], # ninja is required to build extensions 62 install_requires=fetch_requirements(), 63 include_package_data=True, 64 packages=setuptools.find_packages(exclude=("tests", "tests.*")), 65 ext_modules=extensions, 66 cmdclass=cmdclass, 67 python_requires=">=3.6", 68 author="Facebook AI Research", 69 author_email="todo@fb.com", 70 long_description="FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.", 71 long_description_content_type="text/markdown", 72 classifiers=[ 73 "Programming Language :: Python :: 3.7", 74 "Programming Language :: Python :: 3.8", 75 "Programming Language :: Python :: 3.9", 76 "License :: OSI Approved :: BSD License", 77 "Topic :: Scientific/Engineering :: Artificial Intelligence", 78 "Operating System :: OS Independent", 79 ], 80 ) 81 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -64,7 +64,7 @@ packages=setuptools.find_packages(exclude=("tests", "tests.*")), ext_modules=extensions, cmdclass=cmdclass, - python_requires=">=3.6", + python_requires=">=3.7", author="Facebook AI Research", author_email="todo@fb.com", long_description="FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -64,7 +64,7 @@\n packages=setuptools.find_packages(exclude=(\"tests\", \"tests.*\")),\n ext_modules=extensions,\n cmdclass=cmdclass,\n- python_requires=\">=3.6\",\n+ python_requires=\">=3.7\",\n author=\"Facebook AI Research\",\n author_email=\"todo@fb.com\",\n long_description=\"FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.\",\n", "issue": "the main branch is not compatible with python 3.6, but setup.py only requires \">=3.6\"\npython 3.6 can pip install latest fairscale\r\nhttps://github.com/facebookresearch/fairscale/blob/1bc96fa8c69def6d990e42bfbd75f86146ce29bd/setup.py#L67\r\n\r\nbut, some code is not compatible with python 3.6\r\nhttps://github.com/facebookresearch/fairscale/blob/1bc96fa8c69def6d990e42bfbd75f86146ce29bd/fairscale/experimental/nn/ssd_offload.py#L6\r\nand python<3.7 has no dataclasses\r\nhttps://github.com/facebookresearch/fairscale/blob/1bc96fa8c69def6d990e42bfbd75f86146ce29bd/fairscale/nn/data_parallel/fully_sharded_data_parallel.py#L8\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport re\n\nimport setuptools\n\nthis_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef fetch_requirements():\n with open(\"requirements.txt\") as f:\n reqs = f.read().strip().split(\"\\n\")\n return reqs\n\n\n# https://packaging.python.org/guides/single-sourcing-package-version/\ndef find_version(version_file_path) -> str:\n with open(version_file_path) as version_file:\n version_match = re.search(r\"^__version_tuple__ = (.*)\", version_file.read(), re.M)\n if version_match:\n ver_tup = eval(version_match.group(1))\n ver_str = \".\".join([str(x) for x in ver_tup])\n return ver_str\n raise RuntimeError(\"Unable to find version tuple.\")\n\n\nextensions = []\ncmdclass = {}\n\nif os.getenv(\"BUILD_CUDA_EXTENSIONS\", \"0\") == \"1\":\n from torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\n extensions.extend(\n [\n CUDAExtension(\n name=\"fairscale.fused_adam_cuda\",\n include_dirs=[os.path.join(this_dir, \"fairscale/clib/fused_adam_cuda\")],\n sources=[\n \"fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp\",\n \"fairscale/clib/fused_adam_cuda/fused_adam_cuda_kernel.cu\",\n ],\n extra_compile_args={\"cxx\": [\"-O3\"], \"nvcc\": [\"-O3\", \"--use_fast_math\"]},\n )\n ]\n )\n\n cmdclass[\"build_ext\"] = BuildExtension\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"fairscale\",\n description=\"FairScale: A PyTorch library for large-scale and high-performance training.\",\n version=find_version(\"fairscale/version.py\"),\n setup_requires=[\"ninja\"], # ninja is required to build extensions\n install_requires=fetch_requirements(),\n include_package_data=True,\n packages=setuptools.find_packages(exclude=(\"tests\", \"tests.*\")),\n ext_modules=extensions,\n cmdclass=cmdclass,\n python_requires=\">=3.6\",\n author=\"Facebook AI Research\",\n author_email=\"todo@fb.com\",\n long_description=\"FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.\",\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Operating System :: OS Independent\",\n ],\n )\n", "path": "setup.py"}]}
1,570
138
gh_patches_debug_38255
rasdani/github-patches
git_diff
hydroshare__hydroshare-5302
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Enable management command to fix file issues on published resource Currently, running `repair_resource` on published resources will fail. Reproduce by: * Create a resource and add some files * Use the database or shell to remove some of the files from django to put the resource in a "broken" state * Publish the resource * Run the repair_resource --published management command and see that the published resource cannot be fixed by this script **Additional context** HS v2.11.3 </issue> <code> [start of hs_core/management/commands/repair_resource.py] 1 # -*- coding: utf-8 -*- 2 3 """ 4 Check synchronization between iRODS and Django for multiple resources 5 6 This checks that: 7 8 1. every ResourceFile corresponds to an iRODS file 9 2. every iRODS file in {short_id}/data/contents corresponds to a ResourceFile 10 3. every iRODS directory {short_id} corresponds to a Django resource 11 """ 12 13 from django.core.management.base import BaseCommand, CommandError 14 from hs_core.models import BaseResource 15 from hs_core.management.utils import repair_resource 16 from hs_core.views.utils import get_default_admin_user 17 from hs_core import hydroshare 18 from django.utils import timezone 19 from django.db.models import F 20 from datetime import timedelta 21 22 import logging 23 24 25 class Command(BaseCommand): 26 help = "Check synchronization between iRODS and Django." 27 28 def add_arguments(self, parser): 29 parser.add_argument('resource_ids', nargs='*', type=str) 30 parser.add_argument('--days', type=int, dest='days', help='include resources updated in the last X days') 31 parser.add_argument( 32 '--admin', 33 action='store_true', # True for presence, False for absence 34 dest='admin', # value is options['dry_run'] 35 help='run process as admin user - this allows published resources to be modified', 36 ) 37 parser.add_argument( 38 '--dryrun', 39 action='store_true', # True for presence, False for absence 40 dest='dry_run', # value is options['dry_run'] 41 help='run process without saving changes', 42 ) 43 parser.add_argument( 44 '--published', 45 action='store_true', # True for presence, False for absence 46 dest='published', # value is options['published'] 47 help='filter to just published resources', 48 ) 49 50 def handle(self, *args, **options): 51 logger = logging.getLogger(__name__) 52 resources_ids = options['resource_ids'] 53 resources = BaseResource.objects.all() 54 days = options['days'] 55 admin = options['admin'] 56 dry_run = options['dry_run'] 57 published = options['published'] 58 site_url = hydroshare.utils.current_site_url() 59 60 if resources_ids: # an array of resource short_id to check. 61 print("CHECKING RESOURCES PROVIDED") 62 resources = resources.filter(short_id__in=resources_ids) 63 if published: 64 if not dry_run: 65 print("WARNING: Executing with --published arg without --dryrun. Published resources will be modified.") 66 print("FILTERING TO INCLUDE PUBLISHED RESOURCES ONLY") 67 resources = resources.filter(raccess__published=True) 68 69 if days: 70 print(f"FILTERING TO INCLUDE RESOURCES UPDATED IN LAST {days} DAYS") 71 if resources_ids: 72 print("Your supplied resource_ids will be filtered by the --days that you provided. ") 73 cuttoff_time = timezone.now() - timedelta(days) 74 resources = resources.filter(updated__gte=cuttoff_time) 75 76 if dry_run: 77 print("CONDUCTING A DRY RUN: FIXES WILL NOT BE SAVED") 78 79 if not resources: 80 print("NO RESOURCES FOUND MATCHING YOUR FILTER ARGUMENTS") 81 return 82 83 if admin: 84 print("PROCESSES WILL BE RUN AS ADMIN USER. ALLOWS DELETING DJANGO RESOURCE FILES ON PUBLISHED RESOURCES") 85 user = get_default_admin_user() 86 else: 87 user = None 88 89 resources = resources.order_by(F('updated').asc(nulls_first=True)) 90 91 total_res_to_check = resources.count() 92 current_resource = 0 93 impacted_resources = 0 94 total_files_missing_in_django = 0 95 total_files_dangling_in_django = 0 96 resources_with_missing_django = [] 97 resources_with_missing_irods = [] 98 for resource in resources.iterator(): 99 current_resource += 1 100 res_url = site_url + resource.absolute_url 101 print("*" * 100) 102 print(f"{current_resource}/{total_res_to_check}: Checking resource {res_url}") 103 if resource.raccess.published: 104 print("This Resource is published") 105 if admin: 106 print("Command running with --admin. Published resources will be repaired if needed.") 107 else: 108 print("Command running without --admin. Fixing a published resource raise ValidationError") 109 _, missing_in_django, dangling_in_django = repair_resource(resource, logger, dry_run=dry_run, user=user) 110 if dangling_in_django > 0 or missing_in_django > 0: 111 impacted_resources += 1 112 total_files_missing_in_django += missing_in_django 113 total_files_dangling_in_django += dangling_in_django 114 if missing_in_django > 0: 115 resources_with_missing_django.append(res_url) 116 if dangling_in_django > 0: 117 resources_with_missing_irods.append(res_url) 118 print(f"{dangling_in_django} files dangling in Django for this resource.") 119 print(f"{missing_in_django} files missing in Django for this resource.") 120 print(f"Resources thus far with at least one missing django file: {len(resources_with_missing_django)}") 121 print(f"Resources thus far with at least one dangling django file: {len(resources_with_missing_irods)}") 122 print(f"Total resources with discrepancies thus far: {impacted_resources}") 123 print("*" * 100) 124 print("*" * 100) 125 print(f"Number of resources that had at least one file issue: {impacted_resources}") 126 127 print("*" * 100) 128 print(f"Total number of files missing in Django (across all checked resources): \ 129 {total_files_missing_in_django}") 130 print(f"Number of resources with at least one missing django file: {len(resources_with_missing_django)}") 131 for res in resources_with_missing_django: 132 print(res) 133 134 print("*" * 100) 135 print(f"Total number of files dangling in Django (across all checked resources): \ 136 {total_files_dangling_in_django}") 137 print(f"Number of resources with at least one dangling Django file: {len(resources_with_missing_irods)}") 138 for res in resources_with_missing_irods: 139 print(res) 140 141 # Make it simple to detect clean/fail run in Jenkins 142 if impacted_resources: 143 raise CommandError("repair_resources detected problems") 144 else: 145 print("Completed run without detecting issues") 146 [end of hs_core/management/commands/repair_resource.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hs_core/management/commands/repair_resource.py b/hs_core/management/commands/repair_resource.py --- a/hs_core/management/commands/repair_resource.py +++ b/hs_core/management/commands/repair_resource.py @@ -11,6 +11,7 @@ """ from django.core.management.base import BaseCommand, CommandError +from django.core.exceptions import ValidationError from hs_core.models import BaseResource from hs_core.management.utils import repair_resource from hs_core.views.utils import get_default_admin_user @@ -95,6 +96,7 @@ total_files_dangling_in_django = 0 resources_with_missing_django = [] resources_with_missing_irods = [] + failed_resources = [] for resource in resources.iterator(): current_resource += 1 res_url = site_url + resource.absolute_url @@ -106,7 +108,13 @@ print("Command running with --admin. Published resources will be repaired if needed.") else: print("Command running without --admin. Fixing a published resource raise ValidationError") - _, missing_in_django, dangling_in_django = repair_resource(resource, logger, dry_run=dry_run, user=user) + try: + _, missing_in_django, dangling_in_django = repair_resource(resource, logger, dry_run=dry_run, user=user) + except ValidationError as ve: + failed_resources.append(res_url) + print("Exception while attempting to repair resource:") + print(ve) + continue if dangling_in_django > 0 or missing_in_django > 0: impacted_resources += 1 total_files_missing_in_django += missing_in_django @@ -139,7 +147,13 @@ print(res) # Make it simple to detect clean/fail run in Jenkins - if impacted_resources: - raise CommandError("repair_resources detected problems") + if impacted_resources and dry_run: + raise CommandError("repair_resources detected resources in need of repair during dry run") else: - print("Completed run without detecting issues") + print("Completed run of repair_resource") + if failed_resources: + print("*" * 100) + print("Repair was attempted but failed for the following resources:") + for res in resources_with_missing_irods: + print(res) + raise CommandError("Repair was attempted but failed on at least one resource")
{"golden_diff": "diff --git a/hs_core/management/commands/repair_resource.py b/hs_core/management/commands/repair_resource.py\n--- a/hs_core/management/commands/repair_resource.py\n+++ b/hs_core/management/commands/repair_resource.py\n@@ -11,6 +11,7 @@\n \"\"\"\n \n from django.core.management.base import BaseCommand, CommandError\n+from django.core.exceptions import ValidationError\n from hs_core.models import BaseResource\n from hs_core.management.utils import repair_resource\n from hs_core.views.utils import get_default_admin_user\n@@ -95,6 +96,7 @@\n total_files_dangling_in_django = 0\n resources_with_missing_django = []\n resources_with_missing_irods = []\n+ failed_resources = []\n for resource in resources.iterator():\n current_resource += 1\n res_url = site_url + resource.absolute_url\n@@ -106,7 +108,13 @@\n print(\"Command running with --admin. Published resources will be repaired if needed.\")\n else:\n print(\"Command running without --admin. Fixing a published resource raise ValidationError\")\n- _, missing_in_django, dangling_in_django = repair_resource(resource, logger, dry_run=dry_run, user=user)\n+ try:\n+ _, missing_in_django, dangling_in_django = repair_resource(resource, logger, dry_run=dry_run, user=user)\n+ except ValidationError as ve:\n+ failed_resources.append(res_url)\n+ print(\"Exception while attempting to repair resource:\")\n+ print(ve)\n+ continue\n if dangling_in_django > 0 or missing_in_django > 0:\n impacted_resources += 1\n total_files_missing_in_django += missing_in_django\n@@ -139,7 +147,13 @@\n print(res)\n \n # Make it simple to detect clean/fail run in Jenkins\n- if impacted_resources:\n- raise CommandError(\"repair_resources detected problems\")\n+ if impacted_resources and dry_run:\n+ raise CommandError(\"repair_resources detected resources in need of repair during dry run\")\n else:\n- print(\"Completed run without detecting issues\")\n+ print(\"Completed run of repair_resource\")\n+ if failed_resources:\n+ print(\"*\" * 100)\n+ print(\"Repair was attempted but failed for the following resources:\")\n+ for res in resources_with_missing_irods:\n+ print(res)\n+ raise CommandError(\"Repair was attempted but failed on at least one resource\")\n", "issue": "Enable management command to fix file issues on published resource\nCurrently, running `repair_resource` on published resources will fail.\r\n\r\nReproduce by:\r\n* Create a resource and add some files\r\n* Use the database or shell to remove some of the files from django to put the resource in a \"broken\" state\r\n* Publish the resource\r\n* Run the repair_resource --published management command and see that the published resource cannot be fixed by this script\r\n\r\n**Additional context**\r\nHS v2.11.3\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nCheck synchronization between iRODS and Django for multiple resources\n\nThis checks that:\n\n1. every ResourceFile corresponds to an iRODS file\n2. every iRODS file in {short_id}/data/contents corresponds to a ResourceFile\n3. every iRODS directory {short_id} corresponds to a Django resource\n\"\"\"\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom hs_core.models import BaseResource\nfrom hs_core.management.utils import repair_resource\nfrom hs_core.views.utils import get_default_admin_user\nfrom hs_core import hydroshare\nfrom django.utils import timezone\nfrom django.db.models import F\nfrom datetime import timedelta\n\nimport logging\n\n\nclass Command(BaseCommand):\n help = \"Check synchronization between iRODS and Django.\"\n\n def add_arguments(self, parser):\n parser.add_argument('resource_ids', nargs='*', type=str)\n parser.add_argument('--days', type=int, dest='days', help='include resources updated in the last X days')\n parser.add_argument(\n '--admin',\n action='store_true', # True for presence, False for absence\n dest='admin', # value is options['dry_run']\n help='run process as admin user - this allows published resources to be modified',\n )\n parser.add_argument(\n '--dryrun',\n action='store_true', # True for presence, False for absence\n dest='dry_run', # value is options['dry_run']\n help='run process without saving changes',\n )\n parser.add_argument(\n '--published',\n action='store_true', # True for presence, False for absence\n dest='published', # value is options['published']\n help='filter to just published resources',\n )\n\n def handle(self, *args, **options):\n logger = logging.getLogger(__name__)\n resources_ids = options['resource_ids']\n resources = BaseResource.objects.all()\n days = options['days']\n admin = options['admin']\n dry_run = options['dry_run']\n published = options['published']\n site_url = hydroshare.utils.current_site_url()\n\n if resources_ids: # an array of resource short_id to check.\n print(\"CHECKING RESOURCES PROVIDED\")\n resources = resources.filter(short_id__in=resources_ids)\n if published:\n if not dry_run:\n print(\"WARNING: Executing with --published arg without --dryrun. Published resources will be modified.\")\n print(\"FILTERING TO INCLUDE PUBLISHED RESOURCES ONLY\")\n resources = resources.filter(raccess__published=True)\n\n if days:\n print(f\"FILTERING TO INCLUDE RESOURCES UPDATED IN LAST {days} DAYS\")\n if resources_ids:\n print(\"Your supplied resource_ids will be filtered by the --days that you provided. \")\n cuttoff_time = timezone.now() - timedelta(days)\n resources = resources.filter(updated__gte=cuttoff_time)\n\n if dry_run:\n print(\"CONDUCTING A DRY RUN: FIXES WILL NOT BE SAVED\")\n\n if not resources:\n print(\"NO RESOURCES FOUND MATCHING YOUR FILTER ARGUMENTS\")\n return\n\n if admin:\n print(\"PROCESSES WILL BE RUN AS ADMIN USER. ALLOWS DELETING DJANGO RESOURCE FILES ON PUBLISHED RESOURCES\")\n user = get_default_admin_user()\n else:\n user = None\n\n resources = resources.order_by(F('updated').asc(nulls_first=True))\n\n total_res_to_check = resources.count()\n current_resource = 0\n impacted_resources = 0\n total_files_missing_in_django = 0\n total_files_dangling_in_django = 0\n resources_with_missing_django = []\n resources_with_missing_irods = []\n for resource in resources.iterator():\n current_resource += 1\n res_url = site_url + resource.absolute_url\n print(\"*\" * 100)\n print(f\"{current_resource}/{total_res_to_check}: Checking resource {res_url}\")\n if resource.raccess.published:\n print(\"This Resource is published\")\n if admin:\n print(\"Command running with --admin. Published resources will be repaired if needed.\")\n else:\n print(\"Command running without --admin. Fixing a published resource raise ValidationError\")\n _, missing_in_django, dangling_in_django = repair_resource(resource, logger, dry_run=dry_run, user=user)\n if dangling_in_django > 0 or missing_in_django > 0:\n impacted_resources += 1\n total_files_missing_in_django += missing_in_django\n total_files_dangling_in_django += dangling_in_django\n if missing_in_django > 0:\n resources_with_missing_django.append(res_url)\n if dangling_in_django > 0:\n resources_with_missing_irods.append(res_url)\n print(f\"{dangling_in_django} files dangling in Django for this resource.\")\n print(f\"{missing_in_django} files missing in Django for this resource.\")\n print(f\"Resources thus far with at least one missing django file: {len(resources_with_missing_django)}\")\n print(f\"Resources thus far with at least one dangling django file: {len(resources_with_missing_irods)}\")\n print(f\"Total resources with discrepancies thus far: {impacted_resources}\")\n print(\"*\" * 100)\n print(\"*\" * 100)\n print(f\"Number of resources that had at least one file issue: {impacted_resources}\")\n\n print(\"*\" * 100)\n print(f\"Total number of files missing in Django (across all checked resources): \\\n {total_files_missing_in_django}\")\n print(f\"Number of resources with at least one missing django file: {len(resources_with_missing_django)}\")\n for res in resources_with_missing_django:\n print(res)\n\n print(\"*\" * 100)\n print(f\"Total number of files dangling in Django (across all checked resources): \\\n {total_files_dangling_in_django}\")\n print(f\"Number of resources with at least one dangling Django file: {len(resources_with_missing_irods)}\")\n for res in resources_with_missing_irods:\n print(res)\n\n # Make it simple to detect clean/fail run in Jenkins\n if impacted_resources:\n raise CommandError(\"repair_resources detected problems\")\n else:\n print(\"Completed run without detecting issues\")\n", "path": "hs_core/management/commands/repair_resource.py"}]}
2,354
542
gh_patches_debug_16300
rasdani/github-patches
git_diff
pre-commit__pre-commit-399
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Drop python2.6? Is it worth attempting to continue to support python2.6? </issue> <code> [start of setup.py] 1 from setuptools import find_packages 2 from setuptools import setup 3 4 5 setup( 6 name='pre_commit', 7 description=( 8 'A framework for managing and maintaining multi-language pre-commit ' 9 'hooks.' 10 ), 11 url='https://github.com/pre-commit/pre-commit', 12 version='0.8.2', 13 14 author='Anthony Sottile', 15 author_email='asottile@umich.edu', 16 17 platforms='linux', 18 classifiers=[ 19 'License :: OSI Approved :: MIT License', 20 'Programming Language :: Python :: 2', 21 'Programming Language :: Python :: 2.6', 22 'Programming Language :: Python :: 2.7', 23 'Programming Language :: Python :: 3', 24 'Programming Language :: Python :: 3.4', 25 'Programming Language :: Python :: 3.5', 26 'Programming Language :: Python :: Implementation :: CPython', 27 'Programming Language :: Python :: Implementation :: PyPy', 28 ], 29 30 packages=find_packages('.', exclude=('tests*', 'testing*')), 31 package_data={ 32 'pre_commit': [ 33 'resources/hook-tmpl', 34 'resources/pre-push-tmpl', 35 'resources/rbenv.tar.gz', 36 'resources/ruby-build.tar.gz', 37 'resources/ruby-download.tar.gz', 38 ] 39 }, 40 install_requires=[ 41 'aspy.yaml', 42 'cached-property', 43 'jsonschema', 44 'nodeenv>=0.11.1', 45 'pyterminalsize', 46 'pyyaml', 47 'virtualenv', 48 ], 49 extras_require={ 50 ':python_version=="2.6"': ['argparse', 'ordereddict'], 51 }, 52 entry_points={ 53 'console_scripts': [ 54 'pre-commit = pre_commit.main:main', 55 'pre-commit-validate-config = pre_commit.clientlib.validate_config:run', # noqa 56 'pre-commit-validate-manifest = pre_commit.clientlib.validate_manifest:run', # noqa 57 ], 58 }, 59 ) 60 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -18,7 +18,6 @@ classifiers=[ 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', @@ -46,9 +45,6 @@ 'pyyaml', 'virtualenv', ], - extras_require={ - ':python_version=="2.6"': ['argparse', 'ordereddict'], - }, entry_points={ 'console_scripts': [ 'pre-commit = pre_commit.main:main',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,6 @@\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n- 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n@@ -46,9 +45,6 @@\n 'pyyaml',\n 'virtualenv',\n ],\n- extras_require={\n- ':python_version==\"2.6\"': ['argparse', 'ordereddict'],\n- },\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n", "issue": "Drop python2.6?\nIs it worth attempting to continue to support python2.6?\n\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='0.8.2',\n\n author='Anthony Sottile',\n author_email='asottile@umich.edu',\n\n platforms='linux',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n\n packages=find_packages('.', exclude=('tests*', 'testing*')),\n package_data={\n 'pre_commit': [\n 'resources/hook-tmpl',\n 'resources/pre-push-tmpl',\n 'resources/rbenv.tar.gz',\n 'resources/ruby-build.tar.gz',\n 'resources/ruby-download.tar.gz',\n ]\n },\n install_requires=[\n 'aspy.yaml',\n 'cached-property',\n 'jsonschema',\n 'nodeenv>=0.11.1',\n 'pyterminalsize',\n 'pyyaml',\n 'virtualenv',\n ],\n extras_require={\n ':python_version==\"2.6\"': ['argparse', 'ordereddict'],\n },\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n 'pre-commit-validate-config = pre_commit.clientlib.validate_config:run', # noqa\n 'pre-commit-validate-manifest = pre_commit.clientlib.validate_manifest:run', # noqa\n ],\n },\n)\n", "path": "setup.py"}]}
1,069
174
gh_patches_debug_21753
rasdani/github-patches
git_diff
Flexget__Flexget-1600
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> nyaa changed TLD hi peeps. it seems they switched TLD from .eu to .se i changed my local flexget/plugins/sites/nyaa.py, removed the pyc & reloaded the daemon. its pulling stuff. but i aint got the skills to send a pull request, so i thought i'd do the next best thing and say something if you don't want to do anything, i guess thats fine too. the old is redirecting to the new </issue> <code> [start of flexget/plugins/sites/nyaa.py] 1 from __future__ import unicode_literals, division, absolute_import 2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin 3 from future.moves.urllib.parse import quote 4 5 import logging 6 7 import feedparser 8 9 from flexget import plugin 10 from flexget.entry import Entry 11 from flexget.event import event 12 from flexget.utils.search import normalize_unicode 13 14 log = logging.getLogger('nyaa') 15 16 # TODO: Other categories 17 CATEGORIES = {'all': '0_0', 18 'anime': '1_0', 19 'anime eng': '1_37', 20 'anime non-eng': '1_38', 21 'anime raw': '1_11'} 22 FILTERS = ['all', 'filter remakes', 'trusted only', 'a+ only'] 23 24 25 class UrlRewriteNyaa(object): 26 """Nyaa urlrewriter and search plugin.""" 27 28 schema = { 29 'oneOf': [ 30 {'type': 'string', 'enum': list(CATEGORIES)}, 31 { 32 'type': 'object', 33 'properties': { 34 'category': {'type': 'string', 'enum': list(CATEGORIES)}, 35 'filter': {'type': 'string', 'enum': list(FILTERS)} 36 }, 37 'additionalProperties': False 38 } 39 ] 40 } 41 42 def search(self, task, entry, config): 43 if not isinstance(config, dict): 44 config = {'category': config} 45 config.setdefault('category', 'anime eng') 46 config.setdefault('filter', 'all') 47 entries = set() 48 for search_string in entry.get('search_strings', [entry['title']]): 49 name = normalize_unicode(search_string) 50 url = 'http://www.nyaa.eu/?page=rss&cats=%s&filter=%s&term=%s' % ( 51 CATEGORIES[config['category']], FILTERS.index(config['filter']), quote(name.encode('utf-8'))) 52 53 log.debug('requesting: %s' % url) 54 rss = feedparser.parse(url) 55 56 status = rss.get('status', False) 57 if status != 200: 58 log.debug('Search result not 200 (OK), received %s' % status) 59 if status >= 400: 60 continue 61 62 ex = rss.get('bozo_exception', False) 63 if ex: 64 log.error('Got bozo_exception (bad feed) on %s' % url) 65 continue 66 67 for item in rss.entries: 68 entry = Entry() 69 entry['title'] = item.title 70 entry['url'] = item.link 71 # TODO: parse some shit 72 # entry['torrent_seeds'] = int(item.seeds) 73 # entry['torrent_leeches'] = int(item.leechs) 74 # entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches']) 75 # entry['content_size'] = int(item.size) / 1024 / 1024 76 77 entries.add(entry) 78 79 return entries 80 81 def url_rewritable(self, task, entry): 82 return entry['url'].startswith('http://www.nyaa.eu/?page=torrentinfo&tid=') 83 84 def url_rewrite(self, task, entry): 85 entry['url'] = entry['url'].replace('torrentinfo', 'download') 86 87 88 @event('plugin.register') 89 def register_plugin(): 90 plugin.register(UrlRewriteNyaa, 'nyaa', groups=['search', 'urlrewriter'], api_ver=2) 91 [end of flexget/plugins/sites/nyaa.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/flexget/plugins/sites/nyaa.py b/flexget/plugins/sites/nyaa.py --- a/flexget/plugins/sites/nyaa.py +++ b/flexget/plugins/sites/nyaa.py @@ -47,7 +47,7 @@ entries = set() for search_string in entry.get('search_strings', [entry['title']]): name = normalize_unicode(search_string) - url = 'http://www.nyaa.eu/?page=rss&cats=%s&filter=%s&term=%s' % ( + url = 'http://www.nyaa.se/?page=rss&cats=%s&filter=%s&term=%s' % ( CATEGORIES[config['category']], FILTERS.index(config['filter']), quote(name.encode('utf-8'))) log.debug('requesting: %s' % url) @@ -79,7 +79,7 @@ return entries def url_rewritable(self, task, entry): - return entry['url'].startswith('http://www.nyaa.eu/?page=torrentinfo&tid=') + return entry['url'].startswith('http://www.nyaa.se/?page=torrentinfo&tid=') def url_rewrite(self, task, entry): entry['url'] = entry['url'].replace('torrentinfo', 'download')
{"golden_diff": "diff --git a/flexget/plugins/sites/nyaa.py b/flexget/plugins/sites/nyaa.py\n--- a/flexget/plugins/sites/nyaa.py\n+++ b/flexget/plugins/sites/nyaa.py\n@@ -47,7 +47,7 @@\n entries = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n name = normalize_unicode(search_string)\n- url = 'http://www.nyaa.eu/?page=rss&cats=%s&filter=%s&term=%s' % (\n+ url = 'http://www.nyaa.se/?page=rss&cats=%s&filter=%s&term=%s' % (\n CATEGORIES[config['category']], FILTERS.index(config['filter']), quote(name.encode('utf-8')))\n \n log.debug('requesting: %s' % url)\n@@ -79,7 +79,7 @@\n return entries\n \n def url_rewritable(self, task, entry):\n- return entry['url'].startswith('http://www.nyaa.eu/?page=torrentinfo&tid=')\n+ return entry['url'].startswith('http://www.nyaa.se/?page=torrentinfo&tid=')\n \n def url_rewrite(self, task, entry):\n entry['url'] = entry['url'].replace('torrentinfo', 'download')\n", "issue": "nyaa changed TLD\nhi peeps. it seems they switched TLD from .eu to .se\r\n\r\ni changed my local flexget/plugins/sites/nyaa.py, removed the pyc & reloaded the daemon. its pulling stuff. but i aint got the skills to send a pull request, so i thought i'd do the next best thing and say something\r\n\r\nif you don't want to do anything, i guess thats fine too. the old is redirecting to the new\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\nfrom future.moves.urllib.parse import quote\n\nimport logging\n\nimport feedparser\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils.search import normalize_unicode\n\nlog = logging.getLogger('nyaa')\n\n# TODO: Other categories\nCATEGORIES = {'all': '0_0',\n 'anime': '1_0',\n 'anime eng': '1_37',\n 'anime non-eng': '1_38',\n 'anime raw': '1_11'}\nFILTERS = ['all', 'filter remakes', 'trusted only', 'a+ only']\n\n\nclass UrlRewriteNyaa(object):\n \"\"\"Nyaa urlrewriter and search plugin.\"\"\"\n\n schema = {\n 'oneOf': [\n {'type': 'string', 'enum': list(CATEGORIES)},\n {\n 'type': 'object',\n 'properties': {\n 'category': {'type': 'string', 'enum': list(CATEGORIES)},\n 'filter': {'type': 'string', 'enum': list(FILTERS)}\n },\n 'additionalProperties': False\n }\n ]\n }\n\n def search(self, task, entry, config):\n if not isinstance(config, dict):\n config = {'category': config}\n config.setdefault('category', 'anime eng')\n config.setdefault('filter', 'all')\n entries = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n name = normalize_unicode(search_string)\n url = 'http://www.nyaa.eu/?page=rss&cats=%s&filter=%s&term=%s' % (\n CATEGORIES[config['category']], FILTERS.index(config['filter']), quote(name.encode('utf-8')))\n\n log.debug('requesting: %s' % url)\n rss = feedparser.parse(url)\n\n status = rss.get('status', False)\n if status != 200:\n log.debug('Search result not 200 (OK), received %s' % status)\n if status >= 400:\n continue\n\n ex = rss.get('bozo_exception', False)\n if ex:\n log.error('Got bozo_exception (bad feed) on %s' % url)\n continue\n\n for item in rss.entries:\n entry = Entry()\n entry['title'] = item.title\n entry['url'] = item.link\n # TODO: parse some shit\n # entry['torrent_seeds'] = int(item.seeds)\n # entry['torrent_leeches'] = int(item.leechs)\n # entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])\n # entry['content_size'] = int(item.size) / 1024 / 1024\n\n entries.add(entry)\n\n return entries\n\n def url_rewritable(self, task, entry):\n return entry['url'].startswith('http://www.nyaa.eu/?page=torrentinfo&tid=')\n\n def url_rewrite(self, task, entry):\n entry['url'] = entry['url'].replace('torrentinfo', 'download')\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteNyaa, 'nyaa', groups=['search', 'urlrewriter'], api_ver=2)\n", "path": "flexget/plugins/sites/nyaa.py"}]}
1,573
292
gh_patches_debug_25182
rasdani/github-patches
git_diff
mars-project__mars-1155
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Execute groupby failed <!-- Thank you for your contribution! Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue. --> **Describe the bug** `df.groupby().execute()` may fail. **To Reproduce** To help us reproducing this bug, please provide information below: 1. Your Python version 2. The version of Mars you use 3. Versions of crucial packages, such as numpy, scipy and protobuf 4. Full stack of the error. 5. Minimized code to reproduce the error. ``` In [1]: import pandas as pd; import numpy as np In [2]: df = pd.DataFrame(np.random.rand(4, 3), index=np.arange(5, 1, -1)) In [4]: import mars.dataframe as md In [5]: mdf = md.DataFrame(df) In [6]: mdf.groupby(0).execute() --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-6-491b51043e08> in <module> ----> 1 mdf.groupby(0).execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 426 if session is None: 427 session = Session.default_or_local() --> 428 return session.run(self, **kw) 429 430 def fetch(self, session=None, **kw): ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 181 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 182 for t in tileables) --> 183 result = self._sess.run(*tileables, **kw) 184 185 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 88 # set number of running cores 89 self.context.set_ncores(kw['n_parallel']) ---> 90 res = self._executor.execute_tileables(tileables, **kw) 91 return res 92 ~/Workspace/mars/mars/utils.py in _wrapped(*args, **kwargs) 380 _kernel_mode.eager = False 381 _kernel_mode.eager_count = enter_eager_count + 1 --> 382 return func(*args, **kwargs) 383 finally: 384 _kernel_mode.eager_count -= 1 ~/Workspace/mars/mars/utils.py in inner(*args, **kwargs) 468 def inner(*args, **kwargs): 469 with build_mode(): --> 470 return func(*args, **kwargs) 471 return inner 472 ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose) 828 # update shape of tileable and its chunks whatever it's successful or not 829 self._update_tileable_and_chunk_shape( --> 830 tileable_graph, chunk_result, chunk_graph_builder.interrupted_ops) 831 if chunk_graph_builder.done: 832 if len(intermediate_result_keys) > 0: ~/Workspace/mars/mars/executor.py in _update_tileable_and_chunk_shape(self, tileable_graph, chunk_result, failed_ops) 726 continue 727 for c in tiled_n.chunks: --> 728 c.data._shape = chunk_result[c.key].shape 729 new_nsplits = self.get_tileable_nsplits(n, chunk_result=chunk_result) 730 for node in (n, tiled_n): ~/Workspace/mars/mars/lib/groupby_wrapper.py in __getattr__(self, item) 74 if item in getattr(self.obj, 'columns', ()): 75 return self.__getitem__(item) ---> 76 return getattr(self.groupby_obj, item) 77 78 def __iter__(self): ~/miniconda3/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in __getattr__(self, attr) 578 579 raise AttributeError( --> 580 f"'{type(self).__name__}' object has no attribute '{attr}'" 581 ) 582 AttributeError: 'DataFrameGroupBy' object has no attribute 'shape' ``` </issue> <code> [start of mars/lib/groupby_wrapper.py] 1 # Copyright 1999-2020 Alibaba Group Holding Ltd. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from collections.abc import Iterable 16 17 import cloudpickle 18 from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy 19 20 21 class GroupByWrapper: 22 def __init__(self, obj, groupby_obj=None, keys=None, axis=0, level=None, grouper=None, 23 exclusions=None, selection=None, as_index=True, sort=True, 24 group_keys=True, squeeze=False, observed=False, mutated=False, 25 grouper_cache=None): 26 27 def fill_value(v, key): 28 return v if v is not None or groupby_obj is None else getattr(groupby_obj, key) 29 30 self.obj = obj 31 self.keys = fill_value(keys, 'keys') 32 self.axis = fill_value(axis, 'axis') 33 self.level = fill_value(level, 'level') 34 self.exclusions = fill_value(exclusions, 'exclusions') 35 self.selection = selection 36 self.as_index = fill_value(as_index, 'as_index') 37 self.sort = fill_value(sort, 'sort') 38 self.group_keys = fill_value(group_keys, 'group_keys') 39 self.squeeze = fill_value(squeeze, 'squeeze') 40 self.observed = fill_value(observed, 'observed') 41 self.mutated = fill_value(mutated, 'mutated') 42 43 if groupby_obj is None: 44 if obj.ndim == 2: 45 self.groupby_obj = DataFrameGroupBy( 46 obj, keys=keys, axis=axis, level=level, grouper=grouper, exclusions=exclusions, 47 as_index=as_index, group_keys=group_keys, squeeze=squeeze, observed=observed, 48 mutated=mutated) 49 else: 50 self.groupby_obj = SeriesGroupBy( 51 obj, keys=keys, axis=axis, level=level, grouper=grouper, exclusions=exclusions, 52 as_index=as_index, group_keys=group_keys, squeeze=squeeze, observed=observed, 53 mutated=mutated) 54 else: 55 self.groupby_obj = groupby_obj 56 57 self.is_frame = isinstance(self.groupby_obj, DataFrameGroupBy) 58 59 if grouper_cache: 60 self.groupby_obj.grouper._cache = grouper_cache 61 if selection: 62 self.groupby_obj = self.groupby_obj[selection] 63 64 def __getitem__(self, item): 65 return GroupByWrapper( 66 self.obj, keys=self.keys, axis=self.axis, level=self.level, 67 grouper=self.groupby_obj.grouper, exclusions=self.exclusions, selection=item, 68 as_index=self.as_index, sort=self.sort, group_keys=self.group_keys, 69 squeeze=self.squeeze, observed=self.observed, mutated=self.mutated) 70 71 def __getattr__(self, item): 72 if item.startswith('_'): # pragma: no cover 73 return object.__getattribute__(self, item) 74 if item in getattr(self.obj, 'columns', ()): 75 return self.__getitem__(item) 76 return getattr(self.groupby_obj, item) 77 78 def __iter__(self): 79 return self.groupby_obj.__iter__() 80 81 @property 82 def empty(self): 83 return self.obj.empty 84 85 def to_tuple(self, truncate=False, pickle_function=False): 86 if self.selection and truncate: 87 if isinstance(self.selection, Iterable) and not isinstance(self.selection, str): 88 item_list = list(self.selection) 89 else: 90 item_list = [self.selection] 91 item_set = set(item_list) 92 93 if isinstance(self.keys, list): 94 sel_keys = self.keys 95 elif self.keys in self.obj.columns: 96 sel_keys = [self.keys] 97 else: 98 sel_keys = [] 99 100 all_items = item_list + [k for k in sel_keys or () if k not in item_set] 101 if set(all_items) == set(self.obj.columns): 102 obj = self.obj 103 else: 104 obj = self.obj[all_items] 105 else: 106 obj = self.obj 107 108 if pickle_function and callable(self.keys): 109 keys = cloudpickle.dumps(self.keys) 110 else: 111 keys = self.keys 112 113 return obj, keys, self.axis, self.level, self.exclusions, self.selection, \ 114 self.as_index, self.sort, self.group_keys, self.squeeze, self.observed, \ 115 self.mutated, getattr(self.groupby_obj.grouper, '_cache', dict()) 116 117 @classmethod 118 def from_tuple(cls, tp): 119 obj, keys, axis, level, exclusions, selection, as_index, sort, group_keys, squeeze, \ 120 observed, mutated, grouper_cache = tp 121 122 if isinstance(keys, (bytes, bytearray)): 123 keys = cloudpickle.loads(keys) 124 125 return cls(obj, keys=keys, axis=axis, level=level, exclusions=exclusions, selection=selection, 126 as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze, observed=observed, 127 mutated=mutated, grouper_cache=grouper_cache) 128 129 130 def wrapped_groupby(obj, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, 131 squeeze=False, observed=False): 132 groupby_obj = obj.groupby(by=by, axis=axis, level=level, as_index=as_index, sort=sort, 133 group_keys=group_keys, squeeze=squeeze, observed=observed) 134 return GroupByWrapper(obj, groupby_obj=groupby_obj) 135 [end of mars/lib/groupby_wrapper.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mars/lib/groupby_wrapper.py b/mars/lib/groupby_wrapper.py --- a/mars/lib/groupby_wrapper.py +++ b/mars/lib/groupby_wrapper.py @@ -54,13 +54,13 @@ else: self.groupby_obj = groupby_obj - self.is_frame = isinstance(self.groupby_obj, DataFrameGroupBy) - if grouper_cache: self.groupby_obj.grouper._cache = grouper_cache if selection: self.groupby_obj = self.groupby_obj[selection] + self.is_frame = isinstance(self.groupby_obj, DataFrameGroupBy) + def __getitem__(self, item): return GroupByWrapper( self.obj, keys=self.keys, axis=self.axis, level=self.level, @@ -82,6 +82,13 @@ def empty(self): return self.obj.empty + @property + def shape(self): + shape = list(self.groupby_obj.obj.shape) + if self.is_frame and self.selection: + shape[1] = len(self.selection) + return tuple(shape) + def to_tuple(self, truncate=False, pickle_function=False): if self.selection and truncate: if isinstance(self.selection, Iterable) and not isinstance(self.selection, str):
{"golden_diff": "diff --git a/mars/lib/groupby_wrapper.py b/mars/lib/groupby_wrapper.py\n--- a/mars/lib/groupby_wrapper.py\n+++ b/mars/lib/groupby_wrapper.py\n@@ -54,13 +54,13 @@\n else:\n self.groupby_obj = groupby_obj\n \n- self.is_frame = isinstance(self.groupby_obj, DataFrameGroupBy)\n-\n if grouper_cache:\n self.groupby_obj.grouper._cache = grouper_cache\n if selection:\n self.groupby_obj = self.groupby_obj[selection]\n \n+ self.is_frame = isinstance(self.groupby_obj, DataFrameGroupBy)\n+\n def __getitem__(self, item):\n return GroupByWrapper(\n self.obj, keys=self.keys, axis=self.axis, level=self.level,\n@@ -82,6 +82,13 @@\n def empty(self):\n return self.obj.empty\n \n+ @property\n+ def shape(self):\n+ shape = list(self.groupby_obj.obj.shape)\n+ if self.is_frame and self.selection:\n+ shape[1] = len(self.selection)\n+ return tuple(shape)\n+\n def to_tuple(self, truncate=False, pickle_function=False):\n if self.selection and truncate:\n if isinstance(self.selection, Iterable) and not isinstance(self.selection, str):\n", "issue": "[BUG] Execute groupby failed\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\n\r\n`df.groupby().execute()` may fail.\r\n\r\n**To Reproduce**\r\nTo help us reproducing this bug, please provide information below:\r\n1. Your Python version\r\n2. The version of Mars you use\r\n3. Versions of crucial packages, such as numpy, scipy and protobuf\r\n4. Full stack of the error.\r\n5. Minimized code to reproduce the error.\r\n\r\n```\r\nIn [1]: import pandas as pd; import numpy as np \r\n\r\nIn [2]: df = pd.DataFrame(np.random.rand(4, 3), index=np.arange(5, 1, -1)) \r\n\r\nIn [4]: import mars.dataframe as md \r\n\r\nIn [5]: mdf = md.DataFrame(df) \r\n\r\nIn [6]: mdf.groupby(0).execute() \r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-6-491b51043e08> in <module>\r\n----> 1 mdf.groupby(0).execute()\r\n\r\n~/Workspace/mars/mars/core.py in execute(self, session, **kw)\r\n 426 if session is None:\r\n 427 session = Session.default_or_local()\r\n--> 428 return session.run(self, **kw)\r\n 429 \r\n 430 def fetch(self, session=None, **kw):\r\n\r\n~/Workspace/mars/mars/session.py in run(self, *tileables, **kw)\r\n 181 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t\r\n 182 for t in tileables)\r\n--> 183 result = self._sess.run(*tileables, **kw)\r\n 184 \r\n 185 for t in tileables:\r\n\r\n~/Workspace/mars/mars/session.py in run(self, *tileables, **kw)\r\n 88 # set number of running cores\r\n 89 self.context.set_ncores(kw['n_parallel'])\r\n---> 90 res = self._executor.execute_tileables(tileables, **kw)\r\n 91 return res\r\n 92 \r\n\r\n~/Workspace/mars/mars/utils.py in _wrapped(*args, **kwargs)\r\n 380 _kernel_mode.eager = False\r\n 381 _kernel_mode.eager_count = enter_eager_count + 1\r\n--> 382 return func(*args, **kwargs)\r\n 383 finally:\r\n 384 _kernel_mode.eager_count -= 1\r\n\r\n~/Workspace/mars/mars/utils.py in inner(*args, **kwargs)\r\n 468 def inner(*args, **kwargs):\r\n 469 with build_mode():\r\n--> 470 return func(*args, **kwargs)\r\n 471 return inner\r\n 472 \r\n\r\n~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)\r\n 828 # update shape of tileable and its chunks whatever it's successful or not\r\n 829 self._update_tileable_and_chunk_shape(\r\n--> 830 tileable_graph, chunk_result, chunk_graph_builder.interrupted_ops)\r\n 831 if chunk_graph_builder.done:\r\n 832 if len(intermediate_result_keys) > 0:\r\n\r\n~/Workspace/mars/mars/executor.py in _update_tileable_and_chunk_shape(self, tileable_graph, chunk_result, failed_ops)\r\n 726 continue\r\n 727 for c in tiled_n.chunks:\r\n--> 728 c.data._shape = chunk_result[c.key].shape\r\n 729 new_nsplits = self.get_tileable_nsplits(n, chunk_result=chunk_result)\r\n 730 for node in (n, tiled_n):\r\n\r\n~/Workspace/mars/mars/lib/groupby_wrapper.py in __getattr__(self, item)\r\n 74 if item in getattr(self.obj, 'columns', ()):\r\n 75 return self.__getitem__(item)\r\n---> 76 return getattr(self.groupby_obj, item)\r\n 77 \r\n 78 def __iter__(self):\r\n\r\n~/miniconda3/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in __getattr__(self, attr)\r\n 578 \r\n 579 raise AttributeError(\r\n--> 580 f\"'{type(self).__name__}' object has no attribute '{attr}'\"\r\n 581 )\r\n 582 \r\n\r\nAttributeError: 'DataFrameGroupBy' object has no attribute 'shape'\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections.abc import Iterable\n\nimport cloudpickle\nfrom pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy\n\n\nclass GroupByWrapper:\n def __init__(self, obj, groupby_obj=None, keys=None, axis=0, level=None, grouper=None,\n exclusions=None, selection=None, as_index=True, sort=True,\n group_keys=True, squeeze=False, observed=False, mutated=False,\n grouper_cache=None):\n\n def fill_value(v, key):\n return v if v is not None or groupby_obj is None else getattr(groupby_obj, key)\n\n self.obj = obj\n self.keys = fill_value(keys, 'keys')\n self.axis = fill_value(axis, 'axis')\n self.level = fill_value(level, 'level')\n self.exclusions = fill_value(exclusions, 'exclusions')\n self.selection = selection\n self.as_index = fill_value(as_index, 'as_index')\n self.sort = fill_value(sort, 'sort')\n self.group_keys = fill_value(group_keys, 'group_keys')\n self.squeeze = fill_value(squeeze, 'squeeze')\n self.observed = fill_value(observed, 'observed')\n self.mutated = fill_value(mutated, 'mutated')\n\n if groupby_obj is None:\n if obj.ndim == 2:\n self.groupby_obj = DataFrameGroupBy(\n obj, keys=keys, axis=axis, level=level, grouper=grouper, exclusions=exclusions,\n as_index=as_index, group_keys=group_keys, squeeze=squeeze, observed=observed,\n mutated=mutated)\n else:\n self.groupby_obj = SeriesGroupBy(\n obj, keys=keys, axis=axis, level=level, grouper=grouper, exclusions=exclusions,\n as_index=as_index, group_keys=group_keys, squeeze=squeeze, observed=observed,\n mutated=mutated)\n else:\n self.groupby_obj = groupby_obj\n\n self.is_frame = isinstance(self.groupby_obj, DataFrameGroupBy)\n\n if grouper_cache:\n self.groupby_obj.grouper._cache = grouper_cache\n if selection:\n self.groupby_obj = self.groupby_obj[selection]\n\n def __getitem__(self, item):\n return GroupByWrapper(\n self.obj, keys=self.keys, axis=self.axis, level=self.level,\n grouper=self.groupby_obj.grouper, exclusions=self.exclusions, selection=item,\n as_index=self.as_index, sort=self.sort, group_keys=self.group_keys,\n squeeze=self.squeeze, observed=self.observed, mutated=self.mutated)\n\n def __getattr__(self, item):\n if item.startswith('_'): # pragma: no cover\n return object.__getattribute__(self, item)\n if item in getattr(self.obj, 'columns', ()):\n return self.__getitem__(item)\n return getattr(self.groupby_obj, item)\n\n def __iter__(self):\n return self.groupby_obj.__iter__()\n\n @property\n def empty(self):\n return self.obj.empty\n\n def to_tuple(self, truncate=False, pickle_function=False):\n if self.selection and truncate:\n if isinstance(self.selection, Iterable) and not isinstance(self.selection, str):\n item_list = list(self.selection)\n else:\n item_list = [self.selection]\n item_set = set(item_list)\n\n if isinstance(self.keys, list):\n sel_keys = self.keys\n elif self.keys in self.obj.columns:\n sel_keys = [self.keys]\n else:\n sel_keys = []\n\n all_items = item_list + [k for k in sel_keys or () if k not in item_set]\n if set(all_items) == set(self.obj.columns):\n obj = self.obj\n else:\n obj = self.obj[all_items]\n else:\n obj = self.obj\n\n if pickle_function and callable(self.keys):\n keys = cloudpickle.dumps(self.keys)\n else:\n keys = self.keys\n\n return obj, keys, self.axis, self.level, self.exclusions, self.selection, \\\n self.as_index, self.sort, self.group_keys, self.squeeze, self.observed, \\\n self.mutated, getattr(self.groupby_obj.grouper, '_cache', dict())\n\n @classmethod\n def from_tuple(cls, tp):\n obj, keys, axis, level, exclusions, selection, as_index, sort, group_keys, squeeze, \\\n observed, mutated, grouper_cache = tp\n\n if isinstance(keys, (bytes, bytearray)):\n keys = cloudpickle.loads(keys)\n\n return cls(obj, keys=keys, axis=axis, level=level, exclusions=exclusions, selection=selection,\n as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze, observed=observed,\n mutated=mutated, grouper_cache=grouper_cache)\n\n\ndef wrapped_groupby(obj, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True,\n squeeze=False, observed=False):\n groupby_obj = obj.groupby(by=by, axis=axis, level=level, as_index=as_index, sort=sort,\n group_keys=group_keys, squeeze=squeeze, observed=observed)\n return GroupByWrapper(obj, groupby_obj=groupby_obj)\n", "path": "mars/lib/groupby_wrapper.py"}]}
3,192
275
gh_patches_debug_31922
rasdani/github-patches
git_diff
scrapy__scrapy-879
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Enabled extensions, middlewares, pipelines I found that this information isn't being printed anymore. The responsible of this bug is [this line](https://github.com/scrapy/scrapy/pull/816/files#diff-fee03a44ad4de98d9361d89947c8aba3R83), seems that `spider` is `None` on `eventDict` at the moment the components are instantiated. I'm not sure how to fix it because I 'm not quite sure what it is attempting to block. /cc @curita </issue> <code> [start of scrapy/crawler.py] 1 import six 2 import signal 3 import warnings 4 5 from twisted.internet import reactor, defer 6 7 from scrapy.core.engine import ExecutionEngine 8 from scrapy.resolver import CachingThreadedResolver 9 from scrapy.extension import ExtensionManager 10 from scrapy.signalmanager import SignalManager 11 from scrapy.exceptions import ScrapyDeprecationWarning 12 from scrapy.utils.ossignal import install_shutdown_handlers, signal_names 13 from scrapy.utils.misc import load_object 14 from scrapy import log, signals 15 16 17 class Crawler(object): 18 19 def __init__(self, spidercls, settings): 20 self.spidercls = spidercls 21 self.settings = settings 22 self.signals = SignalManager(self) 23 self.stats = load_object(self.settings['STATS_CLASS'])(self) 24 lf_cls = load_object(self.settings['LOG_FORMATTER']) 25 self.logformatter = lf_cls.from_crawler(self) 26 self.extensions = ExtensionManager.from_crawler(self) 27 28 self.crawling = False 29 self.spider = None 30 self.engine = None 31 32 @property 33 def spiders(self): 34 if not hasattr(self, '_spiders'): 35 warnings.warn("Crawler.spiders is deprecated, use " 36 "CrawlerRunner.spiders or instantiate " 37 "scrapy.spidermanager.SpiderManager with your " 38 "settings.", 39 category=ScrapyDeprecationWarning, stacklevel=2) 40 spman_cls = load_object(self.settings['SPIDER_MANAGER_CLASS']) 41 self._spiders = spman_cls.from_settings(self.settings) 42 return self._spiders 43 44 @defer.inlineCallbacks 45 def crawl(self, *args, **kwargs): 46 assert not self.crawling, "Crawling already taking place" 47 self.crawling = True 48 49 try: 50 self.spider = self._create_spider(*args, **kwargs) 51 self.engine = self._create_engine() 52 start_requests = iter(self.spider.start_requests()) 53 yield self.engine.open_spider(self.spider, start_requests) 54 yield defer.maybeDeferred(self.engine.start) 55 except Exception: 56 self.crawling = False 57 raise 58 59 def _create_spider(self, *args, **kwargs): 60 return self.spidercls.from_crawler(self, *args, **kwargs) 61 62 def _create_engine(self): 63 return ExecutionEngine(self, lambda _: self.stop()) 64 65 @defer.inlineCallbacks 66 def stop(self): 67 if self.crawling: 68 self.crawling = False 69 yield defer.maybeDeferred(self.engine.stop) 70 71 72 class CrawlerRunner(object): 73 74 def __init__(self, settings): 75 self.settings = settings 76 smcls = load_object(settings['SPIDER_MANAGER_CLASS']) 77 self.spiders = smcls.from_settings(settings.frozencopy()) 78 self.crawlers = set() 79 self.crawl_deferreds = set() 80 81 def crawl(self, spidercls, *args, **kwargs): 82 crawler = self._create_logged_crawler(spidercls) 83 self.crawlers.add(crawler) 84 85 d = crawler.crawl(*args, **kwargs) 86 self.crawl_deferreds.add(d) 87 return d 88 89 def _create_logged_crawler(self, spidercls): 90 crawler = self._create_crawler(spidercls) 91 log_observer = log.start_from_crawler(crawler) 92 if log_observer: 93 crawler.signals.connect(log_observer.stop, signals.engine_stopped) 94 return crawler 95 96 def _create_crawler(self, spidercls): 97 if isinstance(spidercls, six.string_types): 98 spidercls = self.spiders.load(spidercls) 99 100 crawler_settings = self.settings.copy() 101 spidercls.update_settings(crawler_settings) 102 crawler_settings.freeze() 103 104 crawler = Crawler(spidercls, crawler_settings) 105 return crawler 106 107 def stop(self): 108 return defer.DeferredList(c.stop() for c in self.crawlers) 109 110 111 class CrawlerProcess(CrawlerRunner): 112 """A class to run multiple scrapy crawlers in a process simultaneously""" 113 114 def __init__(self, settings): 115 super(CrawlerProcess, self).__init__(settings) 116 install_shutdown_handlers(self._signal_shutdown) 117 self.stopping = False 118 119 def _signal_shutdown(self, signum, _): 120 install_shutdown_handlers(self._signal_kill) 121 signame = signal_names[signum] 122 log.msg(format="Received %(signame)s, shutting down gracefully. Send again to force ", 123 level=log.INFO, signame=signame) 124 reactor.callFromThread(self.stop) 125 126 def _signal_kill(self, signum, _): 127 install_shutdown_handlers(signal.SIG_IGN) 128 signame = signal_names[signum] 129 log.msg(format='Received %(signame)s twice, forcing unclean shutdown', 130 level=log.INFO, signame=signame) 131 self._stop_logging() 132 reactor.callFromThread(self._stop_reactor) 133 134 def start(self, stop_after_crawl=True, start_reactor=True): 135 self.log_observer = log.start_from_settings(self.settings) 136 log.scrapy_info(self.settings) 137 if start_reactor: 138 self._start_reactor(stop_after_crawl) 139 140 def _start_reactor(self, stop_after_crawl=True): 141 if stop_after_crawl: 142 d = defer.DeferredList(self.crawl_deferreds) 143 if d.called: 144 # Don't start the reactor if the deferreds are already fired 145 return 146 d.addBoth(lambda _: self._stop_reactor()) 147 if self.settings.getbool('DNSCACHE_ENABLED'): 148 reactor.installResolver(CachingThreadedResolver(reactor)) 149 reactor.addSystemEventTrigger('before', 'shutdown', self.stop) 150 reactor.run(installSignalHandlers=False) # blocking call 151 152 def _stop_logging(self): 153 self.log_observer.stop() 154 155 def _stop_reactor(self, _=None): 156 try: 157 reactor.stop() 158 except RuntimeError: # raised if already stopped or in shutdown stage 159 pass 160 [end of scrapy/crawler.py] [start of scrapy/commands/shell.py] 1 """ 2 Scrapy Shell 3 4 See documentation in docs/topics/shell.rst 5 """ 6 7 from threading import Thread 8 9 from scrapy.command import ScrapyCommand 10 from scrapy.shell import Shell 11 from scrapy.http import Request 12 from scrapy import log 13 from scrapy.utils.spider import spidercls_for_request, DefaultSpider 14 15 16 class Command(ScrapyCommand): 17 18 requires_project = False 19 default_settings = {'KEEP_ALIVE': True, 'LOGSTATS_INTERVAL': 0} 20 21 def syntax(self): 22 return "[url|file]" 23 24 def short_desc(self): 25 return "Interactive scraping console" 26 27 def long_desc(self): 28 return "Interactive console for scraping the given url" 29 30 def add_options(self, parser): 31 ScrapyCommand.add_options(self, parser) 32 parser.add_option("-c", dest="code", 33 help="evaluate the code in the shell, print the result and exit") 34 parser.add_option("--spider", dest="spider", 35 help="use this spider") 36 37 def update_vars(self, vars): 38 """You can use this function to update the Scrapy objects that will be 39 available in the shell 40 """ 41 pass 42 43 def run(self, args, opts): 44 url = args[0] if args else None 45 spiders = self.crawler_process.spiders 46 47 spidercls = DefaultSpider 48 if opts.spider: 49 spidercls = spiders.load(opts.spider) 50 elif url: 51 spidercls = spidercls_for_request(spiders, Request(url), 52 spidercls, log_multiple=True) 53 54 # The crawler is created this way since the Shell manually handles the 55 # crawling engine, so the set up in the crawl method won't work 56 crawler = self.crawler_process._create_logged_crawler(spidercls) 57 # The Shell class needs a persistent engine in the crawler 58 crawler.engine = crawler._create_engine() 59 crawler.engine.start() 60 61 self.crawler_process.start(start_reactor=False) 62 self._start_crawler_thread() 63 64 shell = Shell(crawler, update_vars=self.update_vars, code=opts.code) 65 shell.start(url=url) 66 67 def _start_crawler_thread(self): 68 t = Thread(target=self.crawler_process._start_reactor, 69 kwargs={'stop_after_crawl': False}) 70 t.daemon = True 71 t.start() 72 [end of scrapy/commands/shell.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/commands/shell.py b/scrapy/commands/shell.py --- a/scrapy/commands/shell.py +++ b/scrapy/commands/shell.py @@ -58,14 +58,13 @@ crawler.engine = crawler._create_engine() crawler.engine.start() - self.crawler_process.start(start_reactor=False) self._start_crawler_thread() shell = Shell(crawler, update_vars=self.update_vars, code=opts.code) shell.start(url=url) def _start_crawler_thread(self): - t = Thread(target=self.crawler_process._start_reactor, + t = Thread(target=self.crawler_process.start, kwargs={'stop_after_crawl': False}) t.daemon = True t.start() diff --git a/scrapy/crawler.py b/scrapy/crawler.py --- a/scrapy/crawler.py +++ b/scrapy/crawler.py @@ -115,6 +115,8 @@ super(CrawlerProcess, self).__init__(settings) install_shutdown_handlers(self._signal_shutdown) self.stopping = False + self.log_observer = log.start_from_settings(self.settings) + log.scrapy_info(settings) def _signal_shutdown(self, signum, _): install_shutdown_handlers(self._signal_kill) @@ -131,13 +133,7 @@ self._stop_logging() reactor.callFromThread(self._stop_reactor) - def start(self, stop_after_crawl=True, start_reactor=True): - self.log_observer = log.start_from_settings(self.settings) - log.scrapy_info(self.settings) - if start_reactor: - self._start_reactor(stop_after_crawl) - - def _start_reactor(self, stop_after_crawl=True): + def start(self, stop_after_crawl=True): if stop_after_crawl: d = defer.DeferredList(self.crawl_deferreds) if d.called:
{"golden_diff": "diff --git a/scrapy/commands/shell.py b/scrapy/commands/shell.py\n--- a/scrapy/commands/shell.py\n+++ b/scrapy/commands/shell.py\n@@ -58,14 +58,13 @@\n crawler.engine = crawler._create_engine()\n crawler.engine.start()\n \n- self.crawler_process.start(start_reactor=False)\n self._start_crawler_thread()\n \n shell = Shell(crawler, update_vars=self.update_vars, code=opts.code)\n shell.start(url=url)\n \n def _start_crawler_thread(self):\n- t = Thread(target=self.crawler_process._start_reactor,\n+ t = Thread(target=self.crawler_process.start,\n kwargs={'stop_after_crawl': False})\n t.daemon = True\n t.start()\ndiff --git a/scrapy/crawler.py b/scrapy/crawler.py\n--- a/scrapy/crawler.py\n+++ b/scrapy/crawler.py\n@@ -115,6 +115,8 @@\n super(CrawlerProcess, self).__init__(settings)\n install_shutdown_handlers(self._signal_shutdown)\n self.stopping = False\n+ self.log_observer = log.start_from_settings(self.settings)\n+ log.scrapy_info(settings)\n \n def _signal_shutdown(self, signum, _):\n install_shutdown_handlers(self._signal_kill)\n@@ -131,13 +133,7 @@\n self._stop_logging()\n reactor.callFromThread(self._stop_reactor)\n \n- def start(self, stop_after_crawl=True, start_reactor=True):\n- self.log_observer = log.start_from_settings(self.settings)\n- log.scrapy_info(self.settings)\n- if start_reactor:\n- self._start_reactor(stop_after_crawl)\n-\n- def _start_reactor(self, stop_after_crawl=True):\n+ def start(self, stop_after_crawl=True):\n if stop_after_crawl:\n d = defer.DeferredList(self.crawl_deferreds)\n if d.called:\n", "issue": "Enabled extensions, middlewares, pipelines\nI found that this information isn't being printed anymore.\n\nThe responsible of this bug is [this line](https://github.com/scrapy/scrapy/pull/816/files#diff-fee03a44ad4de98d9361d89947c8aba3R83), seems that `spider` is `None` on `eventDict` at the moment the components are instantiated.\n\nI'm not sure how to fix it because I 'm not quite sure what it is attempting to block.\n\n/cc @curita\n\n", "before_files": [{"content": "import six\nimport signal\nimport warnings\n\nfrom twisted.internet import reactor, defer\n\nfrom scrapy.core.engine import ExecutionEngine\nfrom scrapy.resolver import CachingThreadedResolver\nfrom scrapy.extension import ExtensionManager\nfrom scrapy.signalmanager import SignalManager\nfrom scrapy.exceptions import ScrapyDeprecationWarning\nfrom scrapy.utils.ossignal import install_shutdown_handlers, signal_names\nfrom scrapy.utils.misc import load_object\nfrom scrapy import log, signals\n\n\nclass Crawler(object):\n\n def __init__(self, spidercls, settings):\n self.spidercls = spidercls\n self.settings = settings\n self.signals = SignalManager(self)\n self.stats = load_object(self.settings['STATS_CLASS'])(self)\n lf_cls = load_object(self.settings['LOG_FORMATTER'])\n self.logformatter = lf_cls.from_crawler(self)\n self.extensions = ExtensionManager.from_crawler(self)\n\n self.crawling = False\n self.spider = None\n self.engine = None\n\n @property\n def spiders(self):\n if not hasattr(self, '_spiders'):\n warnings.warn(\"Crawler.spiders is deprecated, use \"\n \"CrawlerRunner.spiders or instantiate \"\n \"scrapy.spidermanager.SpiderManager with your \"\n \"settings.\",\n category=ScrapyDeprecationWarning, stacklevel=2)\n spman_cls = load_object(self.settings['SPIDER_MANAGER_CLASS'])\n self._spiders = spman_cls.from_settings(self.settings)\n return self._spiders\n\n @defer.inlineCallbacks\n def crawl(self, *args, **kwargs):\n assert not self.crawling, \"Crawling already taking place\"\n self.crawling = True\n\n try:\n self.spider = self._create_spider(*args, **kwargs)\n self.engine = self._create_engine()\n start_requests = iter(self.spider.start_requests())\n yield self.engine.open_spider(self.spider, start_requests)\n yield defer.maybeDeferred(self.engine.start)\n except Exception:\n self.crawling = False\n raise\n\n def _create_spider(self, *args, **kwargs):\n return self.spidercls.from_crawler(self, *args, **kwargs)\n\n def _create_engine(self):\n return ExecutionEngine(self, lambda _: self.stop())\n\n @defer.inlineCallbacks\n def stop(self):\n if self.crawling:\n self.crawling = False\n yield defer.maybeDeferred(self.engine.stop)\n\n\nclass CrawlerRunner(object):\n\n def __init__(self, settings):\n self.settings = settings\n smcls = load_object(settings['SPIDER_MANAGER_CLASS'])\n self.spiders = smcls.from_settings(settings.frozencopy())\n self.crawlers = set()\n self.crawl_deferreds = set()\n\n def crawl(self, spidercls, *args, **kwargs):\n crawler = self._create_logged_crawler(spidercls)\n self.crawlers.add(crawler)\n\n d = crawler.crawl(*args, **kwargs)\n self.crawl_deferreds.add(d)\n return d\n\n def _create_logged_crawler(self, spidercls):\n crawler = self._create_crawler(spidercls)\n log_observer = log.start_from_crawler(crawler)\n if log_observer:\n crawler.signals.connect(log_observer.stop, signals.engine_stopped)\n return crawler\n\n def _create_crawler(self, spidercls):\n if isinstance(spidercls, six.string_types):\n spidercls = self.spiders.load(spidercls)\n\n crawler_settings = self.settings.copy()\n spidercls.update_settings(crawler_settings)\n crawler_settings.freeze()\n\n crawler = Crawler(spidercls, crawler_settings)\n return crawler\n\n def stop(self):\n return defer.DeferredList(c.stop() for c in self.crawlers)\n\n\nclass CrawlerProcess(CrawlerRunner):\n \"\"\"A class to run multiple scrapy crawlers in a process simultaneously\"\"\"\n\n def __init__(self, settings):\n super(CrawlerProcess, self).__init__(settings)\n install_shutdown_handlers(self._signal_shutdown)\n self.stopping = False\n\n def _signal_shutdown(self, signum, _):\n install_shutdown_handlers(self._signal_kill)\n signame = signal_names[signum]\n log.msg(format=\"Received %(signame)s, shutting down gracefully. Send again to force \",\n level=log.INFO, signame=signame)\n reactor.callFromThread(self.stop)\n\n def _signal_kill(self, signum, _):\n install_shutdown_handlers(signal.SIG_IGN)\n signame = signal_names[signum]\n log.msg(format='Received %(signame)s twice, forcing unclean shutdown',\n level=log.INFO, signame=signame)\n self._stop_logging()\n reactor.callFromThread(self._stop_reactor)\n\n def start(self, stop_after_crawl=True, start_reactor=True):\n self.log_observer = log.start_from_settings(self.settings)\n log.scrapy_info(self.settings)\n if start_reactor:\n self._start_reactor(stop_after_crawl)\n\n def _start_reactor(self, stop_after_crawl=True):\n if stop_after_crawl:\n d = defer.DeferredList(self.crawl_deferreds)\n if d.called:\n # Don't start the reactor if the deferreds are already fired\n return\n d.addBoth(lambda _: self._stop_reactor())\n if self.settings.getbool('DNSCACHE_ENABLED'):\n reactor.installResolver(CachingThreadedResolver(reactor))\n reactor.addSystemEventTrigger('before', 'shutdown', self.stop)\n reactor.run(installSignalHandlers=False) # blocking call\n\n def _stop_logging(self):\n self.log_observer.stop()\n\n def _stop_reactor(self, _=None):\n try:\n reactor.stop()\n except RuntimeError: # raised if already stopped or in shutdown stage\n pass\n", "path": "scrapy/crawler.py"}, {"content": "\"\"\"\nScrapy Shell\n\nSee documentation in docs/topics/shell.rst\n\"\"\"\n\nfrom threading import Thread\n\nfrom scrapy.command import ScrapyCommand\nfrom scrapy.shell import Shell\nfrom scrapy.http import Request\nfrom scrapy import log\nfrom scrapy.utils.spider import spidercls_for_request, DefaultSpider\n\n\nclass Command(ScrapyCommand):\n\n requires_project = False\n default_settings = {'KEEP_ALIVE': True, 'LOGSTATS_INTERVAL': 0}\n\n def syntax(self):\n return \"[url|file]\"\n\n def short_desc(self):\n return \"Interactive scraping console\"\n\n def long_desc(self):\n return \"Interactive console for scraping the given url\"\n\n def add_options(self, parser):\n ScrapyCommand.add_options(self, parser)\n parser.add_option(\"-c\", dest=\"code\",\n help=\"evaluate the code in the shell, print the result and exit\")\n parser.add_option(\"--spider\", dest=\"spider\",\n help=\"use this spider\")\n\n def update_vars(self, vars):\n \"\"\"You can use this function to update the Scrapy objects that will be\n available in the shell\n \"\"\"\n pass\n\n def run(self, args, opts):\n url = args[0] if args else None\n spiders = self.crawler_process.spiders\n\n spidercls = DefaultSpider\n if opts.spider:\n spidercls = spiders.load(opts.spider)\n elif url:\n spidercls = spidercls_for_request(spiders, Request(url),\n spidercls, log_multiple=True)\n\n # The crawler is created this way since the Shell manually handles the\n # crawling engine, so the set up in the crawl method won't work\n crawler = self.crawler_process._create_logged_crawler(spidercls)\n # The Shell class needs a persistent engine in the crawler\n crawler.engine = crawler._create_engine()\n crawler.engine.start()\n\n self.crawler_process.start(start_reactor=False)\n self._start_crawler_thread()\n\n shell = Shell(crawler, update_vars=self.update_vars, code=opts.code)\n shell.start(url=url)\n\n def _start_crawler_thread(self):\n t = Thread(target=self.crawler_process._start_reactor,\n kwargs={'stop_after_crawl': False})\n t.daemon = True\n t.start()\n", "path": "scrapy/commands/shell.py"}]}
2,944
434
gh_patches_debug_10022
rasdani/github-patches
git_diff
bokeh__bokeh-6724
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Set initial date in date picker in models/file/widgets This is needed to make image diff not fail when example is run on different days. </issue> <code> [start of examples/models/file/widgets.py] 1 from __future__ import print_function 2 3 #from datetime import date 4 5 from bokeh.document import Document 6 from bokeh.embed import file_html 7 from bokeh.resources import INLINE 8 from bokeh.util.browser import view 9 from bokeh.models import ColumnDataSource 10 from bokeh.models.layouts import Column, Row, WidgetBox 11 from bokeh.models.widgets import ( 12 Button, Toggle, Dropdown, 13 CheckboxGroup, RadioGroup, 14 CheckboxButtonGroup, RadioButtonGroup, 15 TextInput, AutocompleteInput, 16 Select, MultiSelect, 17 Slider, RangeSlider, #DateRangeSlider, 18 DatePicker, 19 Paragraph, Div, PreText, 20 Panel, Tabs, 21 DataTable, TableColumn, 22 StringFormatter, NumberFormatter, 23 StringEditor, IntEditor, NumberEditor, SelectEditor, 24 ) 25 from bokeh.plotting import figure 26 from bokeh.sampledata.iris import flowers 27 from bokeh.sampledata.autompg2 import autompg2 as mpg 28 29 button = Button(label="Button (disabled) - still has click event", button_type="primary", disabled=True) 30 toggle = Toggle(label="Toggle button", button_type="success") 31 32 menu = [("Item 1", "item_1_value"), ("Item 2", "item_2_value"), ("Item 3", "item_3_value")] 33 34 dropdown = Dropdown(label="Dropdown button", button_type="warning", menu=menu) 35 #dropdown_split = Dropdown(label="Split button", button_type="danger", menu=menu, default_value="default")) 36 37 checkbox_group = CheckboxGroup(labels=["Option 1", "Option 2", "Option 3"], active=[0, 1]) 38 radio_group = RadioGroup(labels=["Option 1", "Option 2", "Option 3"], active=0) 39 40 checkbox_button_group = CheckboxButtonGroup(labels=["Option 1", "Option 2", "Option 3"], active=[0, 1]) 41 radio_button_group = RadioButtonGroup(labels=["Option 1", "Option 2", "Option 3"], active=0) 42 43 text_input = TextInput(placeholder="Enter value ...") 44 45 completions = ["aaa", "aab", "aac", "baa", "caa"] 46 autocomplete_input = AutocompleteInput(placeholder="Enter value ...", completions=completions) 47 48 select = Select(options=["Option 1", "Option 2", "Option 3"]) 49 50 multi_select = MultiSelect(options=["Option %d" % (i+1) for i in range(16)], size=6) 51 52 slider = Slider(value=10, start=0, end=100, step=0.5) 53 54 range_slider = RangeSlider(value=[10, 90], start=0, end=100, step=0.5) 55 56 #date_range_slider = DateRangeSlider(value=(date(2016, 1, 1), date(2016, 12, 31))) 57 58 date_picker = DatePicker() 59 60 paragraph = Paragraph(text="some text") 61 62 div = Div(text="some <b>text</b>") 63 64 pre_text = PreText(text="some text") 65 66 def mk_tab(color): 67 plot = figure(plot_width=300, plot_height=300) 68 plot.scatter(flowers["petal_length"], flowers["petal_width"], color=color, fill_alpha=0.2, size=12) 69 return Panel(title="Tab 1: %s" % color.capitalize(), child=plot) 70 71 tabs = Tabs(tabs=[mk_tab("red"), mk_tab("green"), mk_tab("blue")]) 72 73 source = ColumnDataSource(data=mpg) 74 columns = [ 75 TableColumn(field="manufacturer", 76 title="Manufacturer", 77 editor=SelectEditor(options=sorted(mpg["manufacturer"].unique())), 78 formatter=StringFormatter(font_style="bold")), 79 TableColumn(field="model", 80 title="Model", 81 editor=StringEditor(completions=sorted(mpg["model"].unique()))), 82 TableColumn(field="displ", 83 title="Displacement", 84 editor=NumberEditor(step=0.1), 85 formatter=NumberFormatter(format="0.0")), 86 TableColumn(field="year", 87 title="Year", 88 editor=IntEditor()), 89 TableColumn(field="cyl", 90 title="Cylinders", 91 editor=IntEditor()), 92 TableColumn(field="trans", 93 title="Transmission", 94 editor=SelectEditor(options=sorted(mpg["trans"].unique()))), 95 TableColumn(field="drv", 96 title="Drive", 97 editor=SelectEditor(options=sorted(mpg["drv"].unique()))), 98 TableColumn(field="class", 99 title="Class", 100 editor=SelectEditor(options=sorted(mpg["class"].unique()))), 101 TableColumn(field="cty", 102 title="City MPG", 103 editor=IntEditor()), 104 TableColumn(field="hwy", 105 title="Highway MPG", 106 editor=IntEditor()), 107 ] 108 table = DataTable(source=source, columns=columns, editable=True, width=800) 109 110 widgets = Column(children=[ 111 Row(children=[ 112 WidgetBox(children=[ 113 button, toggle, dropdown, #dropdown_split, 114 checkbox_group, radio_group, 115 checkbox_button_group, radio_button_group, 116 ]), 117 WidgetBox(children=[ 118 text_input, autocomplete_input, 119 select, multi_select, 120 slider, range_slider, #date_range_slider, 121 date_picker, 122 paragraph, div, pre_text, 123 ]), 124 WidgetBox(children=[ 125 tabs, 126 ], width=400), 127 ]), 128 WidgetBox(children=[table]), 129 ]) 130 131 132 doc = Document() 133 doc.add_root(widgets) 134 135 if __name__ == "__main__": 136 doc.validate() 137 filename = "widgets.html" 138 with open(filename, "w") as f: 139 f.write(file_html(doc, INLINE, "Widgets")) 140 print("Wrote %s" % filename) 141 view(filename) 142 [end of examples/models/file/widgets.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/models/file/widgets.py b/examples/models/file/widgets.py --- a/examples/models/file/widgets.py +++ b/examples/models/file/widgets.py @@ -1,6 +1,6 @@ from __future__ import print_function -#from datetime import date +from datetime import date from bokeh.document import Document from bokeh.embed import file_html @@ -55,7 +55,7 @@ #date_range_slider = DateRangeSlider(value=(date(2016, 1, 1), date(2016, 12, 31))) -date_picker = DatePicker() +date_picker = DatePicker(value=date(2017, 8, 1)) paragraph = Paragraph(text="some text")
{"golden_diff": "diff --git a/examples/models/file/widgets.py b/examples/models/file/widgets.py\n--- a/examples/models/file/widgets.py\n+++ b/examples/models/file/widgets.py\n@@ -1,6 +1,6 @@\n from __future__ import print_function\n \n-#from datetime import date\n+from datetime import date\n \n from bokeh.document import Document\n from bokeh.embed import file_html\n@@ -55,7 +55,7 @@\n \n #date_range_slider = DateRangeSlider(value=(date(2016, 1, 1), date(2016, 12, 31)))\n \n-date_picker = DatePicker()\n+date_picker = DatePicker(value=date(2017, 8, 1))\n \n paragraph = Paragraph(text=\"some text\")\n", "issue": "Set initial date in date picker in models/file/widgets\nThis is needed to make image diff not fail when example is run on different days.\r\n\n", "before_files": [{"content": "from __future__ import print_function\n\n#from datetime import date\n\nfrom bokeh.document import Document\nfrom bokeh.embed import file_html\nfrom bokeh.resources import INLINE\nfrom bokeh.util.browser import view\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.layouts import Column, Row, WidgetBox\nfrom bokeh.models.widgets import (\n Button, Toggle, Dropdown,\n CheckboxGroup, RadioGroup,\n CheckboxButtonGroup, RadioButtonGroup,\n TextInput, AutocompleteInput,\n Select, MultiSelect,\n Slider, RangeSlider, #DateRangeSlider,\n DatePicker,\n Paragraph, Div, PreText,\n Panel, Tabs,\n DataTable, TableColumn,\n StringFormatter, NumberFormatter,\n StringEditor, IntEditor, NumberEditor, SelectEditor,\n)\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.iris import flowers\nfrom bokeh.sampledata.autompg2 import autompg2 as mpg\n\nbutton = Button(label=\"Button (disabled) - still has click event\", button_type=\"primary\", disabled=True)\ntoggle = Toggle(label=\"Toggle button\", button_type=\"success\")\n\nmenu = [(\"Item 1\", \"item_1_value\"), (\"Item 2\", \"item_2_value\"), (\"Item 3\", \"item_3_value\")]\n\ndropdown = Dropdown(label=\"Dropdown button\", button_type=\"warning\", menu=menu)\n#dropdown_split = Dropdown(label=\"Split button\", button_type=\"danger\", menu=menu, default_value=\"default\"))\n\ncheckbox_group = CheckboxGroup(labels=[\"Option 1\", \"Option 2\", \"Option 3\"], active=[0, 1])\nradio_group = RadioGroup(labels=[\"Option 1\", \"Option 2\", \"Option 3\"], active=0)\n\ncheckbox_button_group = CheckboxButtonGroup(labels=[\"Option 1\", \"Option 2\", \"Option 3\"], active=[0, 1])\nradio_button_group = RadioButtonGroup(labels=[\"Option 1\", \"Option 2\", \"Option 3\"], active=0)\n\ntext_input = TextInput(placeholder=\"Enter value ...\")\n\ncompletions = [\"aaa\", \"aab\", \"aac\", \"baa\", \"caa\"]\nautocomplete_input = AutocompleteInput(placeholder=\"Enter value ...\", completions=completions)\n\nselect = Select(options=[\"Option 1\", \"Option 2\", \"Option 3\"])\n\nmulti_select = MultiSelect(options=[\"Option %d\" % (i+1) for i in range(16)], size=6)\n\nslider = Slider(value=10, start=0, end=100, step=0.5)\n\nrange_slider = RangeSlider(value=[10, 90], start=0, end=100, step=0.5)\n\n#date_range_slider = DateRangeSlider(value=(date(2016, 1, 1), date(2016, 12, 31)))\n\ndate_picker = DatePicker()\n\nparagraph = Paragraph(text=\"some text\")\n\ndiv = Div(text=\"some <b>text</b>\")\n\npre_text = PreText(text=\"some text\")\n\ndef mk_tab(color):\n plot = figure(plot_width=300, plot_height=300)\n plot.scatter(flowers[\"petal_length\"], flowers[\"petal_width\"], color=color, fill_alpha=0.2, size=12)\n return Panel(title=\"Tab 1: %s\" % color.capitalize(), child=plot)\n\ntabs = Tabs(tabs=[mk_tab(\"red\"), mk_tab(\"green\"), mk_tab(\"blue\")])\n\nsource = ColumnDataSource(data=mpg)\ncolumns = [\n TableColumn(field=\"manufacturer\",\n title=\"Manufacturer\",\n editor=SelectEditor(options=sorted(mpg[\"manufacturer\"].unique())),\n formatter=StringFormatter(font_style=\"bold\")),\n TableColumn(field=\"model\",\n title=\"Model\",\n editor=StringEditor(completions=sorted(mpg[\"model\"].unique()))),\n TableColumn(field=\"displ\",\n title=\"Displacement\",\n editor=NumberEditor(step=0.1),\n formatter=NumberFormatter(format=\"0.0\")),\n TableColumn(field=\"year\",\n title=\"Year\",\n editor=IntEditor()),\n TableColumn(field=\"cyl\",\n title=\"Cylinders\",\n editor=IntEditor()),\n TableColumn(field=\"trans\",\n title=\"Transmission\",\n editor=SelectEditor(options=sorted(mpg[\"trans\"].unique()))),\n TableColumn(field=\"drv\",\n title=\"Drive\",\n editor=SelectEditor(options=sorted(mpg[\"drv\"].unique()))),\n TableColumn(field=\"class\",\n title=\"Class\",\n editor=SelectEditor(options=sorted(mpg[\"class\"].unique()))),\n TableColumn(field=\"cty\",\n title=\"City MPG\",\n editor=IntEditor()),\n TableColumn(field=\"hwy\",\n title=\"Highway MPG\",\n editor=IntEditor()),\n]\ntable = DataTable(source=source, columns=columns, editable=True, width=800)\n\nwidgets = Column(children=[\n Row(children=[\n WidgetBox(children=[\n button, toggle, dropdown, #dropdown_split,\n checkbox_group, radio_group,\n checkbox_button_group, radio_button_group,\n ]),\n WidgetBox(children=[\n text_input, autocomplete_input,\n select, multi_select,\n slider, range_slider, #date_range_slider,\n date_picker,\n paragraph, div, pre_text,\n ]),\n WidgetBox(children=[\n tabs,\n ], width=400),\n ]),\n WidgetBox(children=[table]),\n])\n\n\ndoc = Document()\ndoc.add_root(widgets)\n\nif __name__ == \"__main__\":\n doc.validate()\n filename = \"widgets.html\"\n with open(filename, \"w\") as f:\n f.write(file_html(doc, INLINE, \"Widgets\"))\n print(\"Wrote %s\" % filename)\n view(filename)\n", "path": "examples/models/file/widgets.py"}]}
2,113
163
gh_patches_debug_17871
rasdani/github-patches
git_diff
fal-ai__dbt-fal-779
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support sqlserver Add support for MS sqlserver </issue> <code> [start of projects/adapter/src/dbt/adapters/fal_experimental/adapter_support.py] 1 import functools 2 from time import sleep 3 from typing import Any 4 5 import pandas as pd 6 import sqlalchemy 7 from contextlib import contextmanager 8 from dbt.adapters.base import BaseAdapter, BaseRelation, RelationType 9 from dbt.adapters.base.connections import AdapterResponse, Connection 10 from dbt.config import RuntimeConfig 11 from dbt.parser.manifest import MacroManifest, Manifest, ManifestLoader 12 13 from dbt.adapters import factory 14 15 _SQLALCHEMY_DIALECTS = { 16 "redshift": "redshift+psycopg2", 17 } 18 19 20 def _get_alchemy_engine(adapter: BaseAdapter, connection: Connection) -> Any: 21 # The following code heavily depends on the implementation 22 # details of the known adapters, hence it can't work for 23 # arbitrary ones. 24 adapter_type = adapter.type() 25 26 sqlalchemy_kwargs = {} 27 format_url = lambda url: url 28 if adapter_type == 'trino': 29 import dbt.adapters.fal_experimental.support.trino as support_trino 30 return support_trino.create_engine(adapter) 31 32 if adapter_type == "redshift": 33 # If the given adapter supports the DBAPI (PEP 249), we can 34 # use its connection directly for the engine. 35 sqlalchemy_kwargs["creator"] = lambda *args, **kwargs: connection.handle 36 url = _SQLALCHEMY_DIALECTS.get(adapter_type, adapter_type) + "://" 37 url = format_url(url) 38 else: 39 message = ( 40 f"dbt-fal does not support {adapter_type} adapter. ", 41 f"If you need {adapter_type} support, you can create an issue ", 42 "in our GitHub repository: https://github.com/fal-ai/fal. ", 43 "We will look into it ASAP." 44 ) 45 raise NotImplementedError(message) 46 47 return sqlalchemy.create_engine(url, **sqlalchemy_kwargs) 48 49 50 def drop_relation_if_it_exists(adapter: BaseAdapter, relation: BaseRelation) -> None: 51 if adapter.get_relation( 52 database=relation.database, 53 schema=relation.schema, 54 identifier=relation.identifier, 55 ): 56 adapter.drop_relation(relation) 57 58 59 def write_df_to_relation( 60 adapter: BaseAdapter, 61 relation: BaseRelation, 62 dataframe: pd.DataFrame, 63 *, 64 if_exists: str = "replace", 65 ) -> AdapterResponse: 66 """Generic version of the write_df_to_relation. Materialize the given 67 dataframe to the targeted relation on the adapter.""" 68 69 adapter_type = adapter.type() 70 71 if adapter_type == "snowflake": 72 import dbt.adapters.fal_experimental.support.snowflake as support_snowflake 73 74 return support_snowflake.write_df_to_relation(adapter, dataframe, relation) 75 76 elif adapter_type == "bigquery": 77 import dbt.adapters.fal_experimental.support.bigquery as support_bq 78 79 return support_bq.write_df_to_relation(adapter, dataframe, relation) 80 81 elif adapter_type == "duckdb": 82 import dbt.adapters.fal_experimental.support.duckdb as support_duckdb 83 84 return support_duckdb.write_df_to_relation(adapter, dataframe, relation) 85 86 elif adapter_type == "postgres": 87 import dbt.adapters.fal_experimental.support.postgres as support_postgres 88 89 return support_postgres.write_df_to_relation(adapter, dataframe, relation) 90 91 else: 92 with new_connection(adapter, "fal:write_df_to_relation") as connection: 93 # TODO: this should probably live in the materialization macro. 94 temp_relation = relation.replace_path( 95 identifier=f"__dbt_fal_temp_{relation.identifier}" 96 ) 97 drop_relation_if_it_exists(adapter, temp_relation) 98 99 alchemy_engine = _get_alchemy_engine(adapter, connection) 100 101 # TODO: probably worth handling errors here an returning 102 # a proper adapter response. 103 rows_affected = dataframe.to_sql( 104 con=alchemy_engine, 105 name=temp_relation.identifier, 106 schema=temp_relation.schema, 107 if_exists=if_exists, 108 index=False, 109 ) 110 adapter.cache.add(temp_relation) 111 drop_relation_if_it_exists(adapter, relation) 112 adapter.rename_relation(temp_relation, relation) 113 adapter.commit_if_has_connection() 114 115 return AdapterResponse("OK", rows_affected=rows_affected) 116 117 118 def read_relation_as_df(adapter: BaseAdapter, relation: BaseRelation) -> pd.DataFrame: 119 """Generic version of the read_df_from_relation.""" 120 121 adapter_type = adapter.type() 122 123 if adapter_type == "snowflake": 124 import dbt.adapters.fal_experimental.support.snowflake as support_snowflake 125 126 return support_snowflake.read_relation_as_df(adapter, relation) 127 128 elif adapter_type == "bigquery": 129 import dbt.adapters.fal_experimental.support.bigquery as support_bq 130 131 return support_bq.read_relation_as_df(adapter, relation) 132 133 elif adapter_type == "duckdb": 134 import dbt.adapters.fal_experimental.support.duckdb as support_duckdb 135 136 return support_duckdb.read_relation_as_df(adapter, relation) 137 138 elif adapter_type == "postgres": 139 import dbt.adapters.fal_experimental.support.postgres as support_postgres 140 141 return support_postgres.read_relation_as_df(adapter, relation) 142 143 else: 144 with new_connection(adapter, "fal:read_relation_as_df") as connection: 145 alchemy_engine = _get_alchemy_engine(adapter, connection) 146 return pd.read_sql_table( 147 con=alchemy_engine, 148 table_name=relation.identifier, 149 schema=relation.schema, 150 ) 151 152 153 def prepare_for_adapter(adapter: BaseAdapter, function: Any) -> Any: 154 """Prepare the given function to be used with string-like inputs 155 (for relations) on the given adapter.""" 156 157 @functools.wraps(function) 158 def wrapped(quoted_relation: str, *args, **kwargs) -> Any: 159 relation = adapter.Relation.create( 160 *quoted_relation.split("."), type=RelationType.Table 161 ) 162 return function(adapter, relation, *args, **kwargs) 163 164 return wrapped 165 166 167 def reconstruct_adapter(config: RuntimeConfig, manifest: Manifest, macro_manifest: MacroManifest) -> BaseAdapter: 168 from dbt.tracking import do_not_track 169 170 # Prepare the DBT to not to track us. 171 do_not_track() 172 173 # Prepare the plugin loading system to handle the adapter. 174 factory.load_plugin(config.credentials.type) 175 factory.load_plugin(config.python_adapter_credentials.type) 176 factory.register_adapter(config) 177 178 # Initialize the adapter 179 db_adapter = factory.get_adapter(config) 180 db_adapter._macro_manifest_lazy = macro_manifest 181 reload_adapter_cache(db_adapter, manifest) 182 183 return db_adapter 184 185 186 def reload_adapter_cache(adapter: BaseAdapter, manifest: Manifest) -> None: 187 with new_connection(adapter, "fal:reload_adapter_cache"): 188 adapter.set_relations_cache(manifest, True) 189 190 191 @contextmanager 192 def new_connection(adapter: BaseAdapter, connection_name: str) -> Connection: 193 with adapter.connection_named(connection_name): 194 yield adapter.connections.get_thread_connection() 195 [end of projects/adapter/src/dbt/adapters/fal_experimental/adapter_support.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/projects/adapter/src/dbt/adapters/fal_experimental/adapter_support.py b/projects/adapter/src/dbt/adapters/fal_experimental/adapter_support.py --- a/projects/adapter/src/dbt/adapters/fal_experimental/adapter_support.py +++ b/projects/adapter/src/dbt/adapters/fal_experimental/adapter_support.py @@ -14,6 +14,7 @@ _SQLALCHEMY_DIALECTS = { "redshift": "redshift+psycopg2", + "sqlserver": "mssql+pyodbc", } @@ -35,6 +36,10 @@ sqlalchemy_kwargs["creator"] = lambda *args, **kwargs: connection.handle url = _SQLALCHEMY_DIALECTS.get(adapter_type, adapter_type) + "://" url = format_url(url) + elif adapter_type == "sqlserver": + sqlalchemy_kwargs["creator"] = lambda *args, **kwargs: connection.handle + url = _SQLALCHEMY_DIALECTS.get(adapter_type, adapter_type) + "://" + url = format_url(url) else: message = ( f"dbt-fal does not support {adapter_type} adapter. ",
{"golden_diff": "diff --git a/projects/adapter/src/dbt/adapters/fal_experimental/adapter_support.py b/projects/adapter/src/dbt/adapters/fal_experimental/adapter_support.py\n--- a/projects/adapter/src/dbt/adapters/fal_experimental/adapter_support.py\n+++ b/projects/adapter/src/dbt/adapters/fal_experimental/adapter_support.py\n@@ -14,6 +14,7 @@\n \n _SQLALCHEMY_DIALECTS = {\n \"redshift\": \"redshift+psycopg2\",\n+ \"sqlserver\": \"mssql+pyodbc\",\n }\n \n \n@@ -35,6 +36,10 @@\n sqlalchemy_kwargs[\"creator\"] = lambda *args, **kwargs: connection.handle\n url = _SQLALCHEMY_DIALECTS.get(adapter_type, adapter_type) + \"://\"\n url = format_url(url)\n+ elif adapter_type == \"sqlserver\":\n+ sqlalchemy_kwargs[\"creator\"] = lambda *args, **kwargs: connection.handle\n+ url = _SQLALCHEMY_DIALECTS.get(adapter_type, adapter_type) + \"://\"\n+ url = format_url(url)\n else:\n message = (\n f\"dbt-fal does not support {adapter_type} adapter. \",\n", "issue": "Support sqlserver\nAdd support for MS sqlserver\n", "before_files": [{"content": "import functools\nfrom time import sleep\nfrom typing import Any\n\nimport pandas as pd\nimport sqlalchemy\nfrom contextlib import contextmanager\nfrom dbt.adapters.base import BaseAdapter, BaseRelation, RelationType\nfrom dbt.adapters.base.connections import AdapterResponse, Connection\nfrom dbt.config import RuntimeConfig\nfrom dbt.parser.manifest import MacroManifest, Manifest, ManifestLoader\n\nfrom dbt.adapters import factory\n\n_SQLALCHEMY_DIALECTS = {\n \"redshift\": \"redshift+psycopg2\",\n}\n\n\ndef _get_alchemy_engine(adapter: BaseAdapter, connection: Connection) -> Any:\n # The following code heavily depends on the implementation\n # details of the known adapters, hence it can't work for\n # arbitrary ones.\n adapter_type = adapter.type()\n\n sqlalchemy_kwargs = {}\n format_url = lambda url: url\n if adapter_type == 'trino':\n import dbt.adapters.fal_experimental.support.trino as support_trino\n return support_trino.create_engine(adapter)\n\n if adapter_type == \"redshift\":\n # If the given adapter supports the DBAPI (PEP 249), we can\n # use its connection directly for the engine.\n sqlalchemy_kwargs[\"creator\"] = lambda *args, **kwargs: connection.handle\n url = _SQLALCHEMY_DIALECTS.get(adapter_type, adapter_type) + \"://\"\n url = format_url(url)\n else:\n message = (\n f\"dbt-fal does not support {adapter_type} adapter. \",\n f\"If you need {adapter_type} support, you can create an issue \",\n \"in our GitHub repository: https://github.com/fal-ai/fal. \",\n \"We will look into it ASAP.\"\n )\n raise NotImplementedError(message)\n\n return sqlalchemy.create_engine(url, **sqlalchemy_kwargs)\n\n\ndef drop_relation_if_it_exists(adapter: BaseAdapter, relation: BaseRelation) -> None:\n if adapter.get_relation(\n database=relation.database,\n schema=relation.schema,\n identifier=relation.identifier,\n ):\n adapter.drop_relation(relation)\n\n\ndef write_df_to_relation(\n adapter: BaseAdapter,\n relation: BaseRelation,\n dataframe: pd.DataFrame,\n *,\n if_exists: str = \"replace\",\n) -> AdapterResponse:\n \"\"\"Generic version of the write_df_to_relation. Materialize the given\n dataframe to the targeted relation on the adapter.\"\"\"\n\n adapter_type = adapter.type()\n\n if adapter_type == \"snowflake\":\n import dbt.adapters.fal_experimental.support.snowflake as support_snowflake\n\n return support_snowflake.write_df_to_relation(adapter, dataframe, relation)\n\n elif adapter_type == \"bigquery\":\n import dbt.adapters.fal_experimental.support.bigquery as support_bq\n\n return support_bq.write_df_to_relation(adapter, dataframe, relation)\n\n elif adapter_type == \"duckdb\":\n import dbt.adapters.fal_experimental.support.duckdb as support_duckdb\n\n return support_duckdb.write_df_to_relation(adapter, dataframe, relation)\n\n elif adapter_type == \"postgres\":\n import dbt.adapters.fal_experimental.support.postgres as support_postgres\n\n return support_postgres.write_df_to_relation(adapter, dataframe, relation)\n\n else:\n with new_connection(adapter, \"fal:write_df_to_relation\") as connection:\n # TODO: this should probably live in the materialization macro.\n temp_relation = relation.replace_path(\n identifier=f\"__dbt_fal_temp_{relation.identifier}\"\n )\n drop_relation_if_it_exists(adapter, temp_relation)\n\n alchemy_engine = _get_alchemy_engine(adapter, connection)\n\n # TODO: probably worth handling errors here an returning\n # a proper adapter response.\n rows_affected = dataframe.to_sql(\n con=alchemy_engine,\n name=temp_relation.identifier,\n schema=temp_relation.schema,\n if_exists=if_exists,\n index=False,\n )\n adapter.cache.add(temp_relation)\n drop_relation_if_it_exists(adapter, relation)\n adapter.rename_relation(temp_relation, relation)\n adapter.commit_if_has_connection()\n\n return AdapterResponse(\"OK\", rows_affected=rows_affected)\n\n\ndef read_relation_as_df(adapter: BaseAdapter, relation: BaseRelation) -> pd.DataFrame:\n \"\"\"Generic version of the read_df_from_relation.\"\"\"\n\n adapter_type = adapter.type()\n\n if adapter_type == \"snowflake\":\n import dbt.adapters.fal_experimental.support.snowflake as support_snowflake\n\n return support_snowflake.read_relation_as_df(adapter, relation)\n\n elif adapter_type == \"bigquery\":\n import dbt.adapters.fal_experimental.support.bigquery as support_bq\n\n return support_bq.read_relation_as_df(adapter, relation)\n\n elif adapter_type == \"duckdb\":\n import dbt.adapters.fal_experimental.support.duckdb as support_duckdb\n\n return support_duckdb.read_relation_as_df(adapter, relation)\n\n elif adapter_type == \"postgres\":\n import dbt.adapters.fal_experimental.support.postgres as support_postgres\n\n return support_postgres.read_relation_as_df(adapter, relation)\n\n else:\n with new_connection(adapter, \"fal:read_relation_as_df\") as connection:\n alchemy_engine = _get_alchemy_engine(adapter, connection)\n return pd.read_sql_table(\n con=alchemy_engine,\n table_name=relation.identifier,\n schema=relation.schema,\n )\n\n\ndef prepare_for_adapter(adapter: BaseAdapter, function: Any) -> Any:\n \"\"\"Prepare the given function to be used with string-like inputs\n (for relations) on the given adapter.\"\"\"\n\n @functools.wraps(function)\n def wrapped(quoted_relation: str, *args, **kwargs) -> Any:\n relation = adapter.Relation.create(\n *quoted_relation.split(\".\"), type=RelationType.Table\n )\n return function(adapter, relation, *args, **kwargs)\n\n return wrapped\n\n\ndef reconstruct_adapter(config: RuntimeConfig, manifest: Manifest, macro_manifest: MacroManifest) -> BaseAdapter:\n from dbt.tracking import do_not_track\n\n # Prepare the DBT to not to track us.\n do_not_track()\n\n # Prepare the plugin loading system to handle the adapter.\n factory.load_plugin(config.credentials.type)\n factory.load_plugin(config.python_adapter_credentials.type)\n factory.register_adapter(config)\n\n # Initialize the adapter\n db_adapter = factory.get_adapter(config)\n db_adapter._macro_manifest_lazy = macro_manifest\n reload_adapter_cache(db_adapter, manifest)\n\n return db_adapter\n\n\ndef reload_adapter_cache(adapter: BaseAdapter, manifest: Manifest) -> None:\n with new_connection(adapter, \"fal:reload_adapter_cache\"):\n adapter.set_relations_cache(manifest, True)\n\n\n@contextmanager\ndef new_connection(adapter: BaseAdapter, connection_name: str) -> Connection:\n with adapter.connection_named(connection_name):\n yield adapter.connections.get_thread_connection()\n", "path": "projects/adapter/src/dbt/adapters/fal_experimental/adapter_support.py"}]}
2,534
267
gh_patches_debug_5302
rasdani/github-patches
git_diff
searx__searx-2991
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Only a lower case "ip" displays the IP address When the feature is enabled to show a user's IP address when "ip" is entered into the search bar, it only does so when it is all lowercase. Querying "IP" does not return an IP. This seems like a bug, apologies if this was intended. Thanks </issue> <code> [start of searx/plugins/self_info.py] 1 ''' 2 searx is free software: you can redistribute it and/or modify 3 it under the terms of the GNU Affero General Public License as published by 4 the Free Software Foundation, either version 3 of the License, or 5 (at your option) any later version. 6 7 searx is distributed in the hope that it will be useful, 8 but WITHOUT ANY WARRANTY; without even the implied warranty of 9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 GNU Affero General Public License for more details. 11 12 You should have received a copy of the GNU Affero General Public License 13 along with searx. If not, see < http://www.gnu.org/licenses/ >. 14 15 (C) 2015 by Adam Tauber, <asciimoo@gmail.com> 16 ''' 17 from flask_babel import gettext 18 import re 19 name = gettext('Self Informations') 20 description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".') 21 default_on = True 22 23 24 # Self User Agent regex 25 p = re.compile('.*user[ -]agent.*', re.IGNORECASE) 26 27 28 # attach callback to the post search hook 29 # request: flask request object 30 # ctx: the whole local context of the pre search hook 31 def post_search(request, search): 32 if search.search_query.pageno > 1: 33 return True 34 if search.search_query.query == 'ip': 35 x_forwarded_for = request.headers.getlist("X-Forwarded-For") 36 if x_forwarded_for: 37 ip = x_forwarded_for[0] 38 else: 39 ip = request.remote_addr 40 search.result_container.answers['ip'] = {'answer': ip} 41 elif p.match(search.search_query.query): 42 ua = request.user_agent 43 search.result_container.answers['user-agent'] = {'answer': ua} 44 return True 45 [end of searx/plugins/self_info.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py --- a/searx/plugins/self_info.py +++ b/searx/plugins/self_info.py @@ -31,7 +31,7 @@ def post_search(request, search): if search.search_query.pageno > 1: return True - if search.search_query.query == 'ip': + if search.search_query.query.lower() == 'ip': x_forwarded_for = request.headers.getlist("X-Forwarded-For") if x_forwarded_for: ip = x_forwarded_for[0]
{"golden_diff": "diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py\n--- a/searx/plugins/self_info.py\n+++ b/searx/plugins/self_info.py\n@@ -31,7 +31,7 @@\n def post_search(request, search):\n if search.search_query.pageno > 1:\n return True\n- if search.search_query.query == 'ip':\n+ if search.search_query.query.lower() == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n", "issue": "Only a lower case \"ip\" displays the IP address\nWhen the feature is enabled to show a user's IP address when \"ip\" is entered into the search bar, it only does so when it is all lowercase. Querying \"IP\" does not return an IP. This seems like a bug, apologies if this was intended.\r\n\r\nThanks\n", "before_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <asciimoo@gmail.com>\n'''\nfrom flask_babel import gettext\nimport re\nname = gettext('Self Informations')\ndescription = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\ndefault_on = True\n\n\n# Self User Agent regex\np = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n\n\n# attach callback to the post search hook\n# request: flask request object\n# ctx: the whole local context of the pre search hook\ndef post_search(request, search):\n if search.search_query.pageno > 1:\n return True\n if search.search_query.query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n else:\n ip = request.remote_addr\n search.result_container.answers['ip'] = {'answer': ip}\n elif p.match(search.search_query.query):\n ua = request.user_agent\n search.result_container.answers['user-agent'] = {'answer': ua}\n return True\n", "path": "searx/plugins/self_info.py"}]}
1,086
134
gh_patches_debug_24158
rasdani/github-patches
git_diff
pystiche__pystiche-9
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> propagate_guide() of Encoder raises a TypeError When running the replication of [Gatys et al. 2017](https://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/replication/gatys_et_al_2017.py#L254), the following error is raised: ``` TypeError: Unions cannot be used with isinstance(). ``` This points towards the [Encoder](https://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/pystiche/encoding/encoder.py#L12), specifically these `if` statements in the `propagate_guide()` method: https://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/pystiche/encoding/encoder.py#L50-L53 `PoolModule` and `ConvModule` are defined in `pystiche.typing`: https://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/pystiche/typing.py#L18-L23 </issue> <code> [start of pystiche/typing.py] 1 from typing import Union, Sequence 2 import torch 3 from torch import nn 4 5 __all__ = [ 6 "Numeric", 7 "TensorMeta", 8 "ConvModule", 9 "ConvModuleMeta", 10 "PoolModule", 11 "PoolModuleMeta", 12 ] 13 14 Numeric = Union[int, float] 15 16 TensorMeta = Union[torch.device, torch.dtype] 17 18 ConvModule = Union[nn.Conv1d, nn.Conv2d, nn.Conv2d] 19 ConvModuleMeta = Union[int, Sequence[int]] 20 21 PoolModule = Union[ 22 nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d, nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d 23 ] 24 PoolModuleMeta = Union[int, Sequence[int]] 25 [end of pystiche/typing.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pystiche/typing.py b/pystiche/typing.py --- a/pystiche/typing.py +++ b/pystiche/typing.py @@ -1,4 +1,4 @@ -from typing import Union, Sequence +from typing import Union, Any, Sequence import torch from torch import nn @@ -6,8 +6,10 @@ "Numeric", "TensorMeta", "ConvModule", + "is_conv_module", "ConvModuleMeta", "PoolModule", + "is_pool_module", "PoolModuleMeta", ] @@ -15,10 +17,32 @@ TensorMeta = Union[torch.device, torch.dtype] -ConvModule = Union[nn.Conv1d, nn.Conv2d, nn.Conv2d] +ConvModule = Union[nn.Conv1d, nn.Conv2d, nn.Conv3d] + + +def is_conv_module(x: Any) -> bool: + return isinstance(x, (nn.Conv1d, nn.Conv2d, nn.Conv3d)) + + ConvModuleMeta = Union[int, Sequence[int]] PoolModule = Union[ nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d, nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d ] + + +def is_pool_module(x: Any) -> bool: + return isinstance( + x, + ( + nn.AvgPool1d, + nn.AvgPool2d, + nn.AvgPool3d, + nn.MaxPool1d, + nn.MaxPool2d, + nn.MaxPool3d, + ), + ) + + PoolModuleMeta = Union[int, Sequence[int]]
{"golden_diff": "diff --git a/pystiche/typing.py b/pystiche/typing.py\n--- a/pystiche/typing.py\n+++ b/pystiche/typing.py\n@@ -1,4 +1,4 @@\n-from typing import Union, Sequence\n+from typing import Union, Any, Sequence\n import torch\n from torch import nn\n \n@@ -6,8 +6,10 @@\n \"Numeric\",\n \"TensorMeta\",\n \"ConvModule\",\n+ \"is_conv_module\",\n \"ConvModuleMeta\",\n \"PoolModule\",\n+ \"is_pool_module\",\n \"PoolModuleMeta\",\n ]\n \n@@ -15,10 +17,32 @@\n \n TensorMeta = Union[torch.device, torch.dtype]\n \n-ConvModule = Union[nn.Conv1d, nn.Conv2d, nn.Conv2d]\n+ConvModule = Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]\n+\n+\n+def is_conv_module(x: Any) -> bool:\n+ return isinstance(x, (nn.Conv1d, nn.Conv2d, nn.Conv3d))\n+\n+\n ConvModuleMeta = Union[int, Sequence[int]]\n \n PoolModule = Union[\n nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d, nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d\n ]\n+\n+\n+def is_pool_module(x: Any) -> bool:\n+ return isinstance(\n+ x,\n+ (\n+ nn.AvgPool1d,\n+ nn.AvgPool2d,\n+ nn.AvgPool3d,\n+ nn.MaxPool1d,\n+ nn.MaxPool2d,\n+ nn.MaxPool3d,\n+ ),\n+ )\n+\n+\n PoolModuleMeta = Union[int, Sequence[int]]\n", "issue": "propagate_guide() of Encoder raises a TypeError\nWhen running the replication of [Gatys et al. 2017](https://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/replication/gatys_et_al_2017.py#L254), the following error is raised:\r\n\r\n```\r\nTypeError: Unions cannot be used with isinstance().\r\n```\r\n\r\nThis points towards the [Encoder](https://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/pystiche/encoding/encoder.py#L12), specifically these `if` statements in the `propagate_guide()` method:\r\n\r\nhttps://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/pystiche/encoding/encoder.py#L50-L53\r\n\r\n`PoolModule` and `ConvModule` are defined in `pystiche.typing`:\r\n\r\nhttps://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/pystiche/typing.py#L18-L23\r\n\n", "before_files": [{"content": "from typing import Union, Sequence\nimport torch\nfrom torch import nn\n\n__all__ = [\n \"Numeric\",\n \"TensorMeta\",\n \"ConvModule\",\n \"ConvModuleMeta\",\n \"PoolModule\",\n \"PoolModuleMeta\",\n]\n\nNumeric = Union[int, float]\n\nTensorMeta = Union[torch.device, torch.dtype]\n\nConvModule = Union[nn.Conv1d, nn.Conv2d, nn.Conv2d]\nConvModuleMeta = Union[int, Sequence[int]]\n\nPoolModule = Union[\n nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d, nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d\n]\nPoolModuleMeta = Union[int, Sequence[int]]\n", "path": "pystiche/typing.py"}]}
1,078
388
gh_patches_debug_21080
rasdani/github-patches
git_diff
conda__conda-6131
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ValidationError: invalid value for timestamp MODERATOR EDIT: This issue relates to an error that looks something like ValidationError: Invalid value 1505856869.685 for timestamp The issue relates to using conda 4.3.28, and then downgrading (presumably via conda-forge) to a previous version of conda, like 4.3.27. # SOLUTION # ### To fix on macOS: # in the command below, set PATH_TO_ENVIRONMENT yourself sed -i '' -E 's|("timestamp": [0-9]+)\.|\1|' /PATH_TO_ENVIRONMENT/conda-meta/*.json ### To fix on Linux: # in the command below, set PATH_TO_ENVIRONMENT yourself sed -i -E 's|("timestamp": [0-9]+)\.|\1|' /PATH_TO_ENVIRONMENT/conda-meta/*.json ### To fix on Windows: Open notepad, and copy the contents below to `c:\fix_timestamps.py` ```python PATH_TO_ENVIRONMENT="c:\\ProgramData\\Anaconda3" # <-- fill this in yourself # backslashes must be doubled from glob import glob import json import os for path in glob(os.path.join(PATH_TO_ENVIRONMENT, 'conda-meta', '*.json')): with open(path) as fh: content = json.load(fh) if 'timestamp' in content: old_timestamp = content['timestamp'] content['timestamp'] = int(old_timestamp) if old_timestamp != content['timestamp']: with open(path, 'w') as fh: fh.write(json.dumps(content, indent=2, sort_keys=True, separators=(',', ': '))) ``` Also, change the path in the variable `PATH_TO_ENVIRONMENT` to point to the conda environment you want to fix. Then run the script with `python c:\fix_timestamps.py`. ---- EDITED: I realized this is not a pyqt issue, it's a conda issue ### steps to reproduce 1. Install 64-bit miniconda on windows 10 (version 4.3.27) 2. conda update to `4.3.28-py36h9daa44c_0` 3. conda install -c anaconda spyder 4. conda config --add channels conda-forge Now any calls to conda results in the print-out below. ### conda info This prints any time I try to use a conda command. ```conda install -c dsdale24 pyqt5 An unexpected error has occurred. Please consider posting the following information to the conda GitHub issue tracker at: https://github.com/conda/conda/issues Current conda install: platform : win-64 conda version : 4.3.27 conda is private : False conda-env version : 4.3.27 conda-build version : not installed python version : 3.6.2.final.0 requests version : 2.18.4 root environment : C:\Users\jrinker\Miniconda3 (writable) default environment : C:\Users\jrinker\Miniconda3 envs directories : C:\Users\jrinker\Miniconda3\envs C:\Users\jrinker\AppData\Local\conda\conda\envs C:\Users\jrinker\.conda\envs package cache : C:\Users\jrinker\Miniconda3\pkgs C:\Users\jrinker\AppData\Local\conda\conda\pkgs channel URLs : https://conda.anaconda.org/dsdale24/win-64 https://conda.anaconda.org/dsdale24/noarch https://conda.anaconda.org/conda-forge/win-64 https://conda.anaconda.org/conda-forge/noarch https://repo.continuum.io/pkgs/main/win-64 https://repo.continuum.io/pkgs/main/noarch https://repo.continuum.io/pkgs/free/win-64 https://repo.continuum.io/pkgs/free/noarch https://repo.continuum.io/pkgs/r/win-64 https://repo.continuum.io/pkgs/r/noarch https://repo.continuum.io/pkgs/pro/win-64 https://repo.continuum.io/pkgs/pro/noarch https://repo.continuum.io/pkgs/msys2/win-64 https://repo.continuum.io/pkgs/msys2/noarch config file : C:\Users\jrinker\.condarc netrc file : None offline mode : False user-agent : conda/4.3.27 requests/2.18.4 CPython/3.6.2 Windows/10 Windows/10.0.15063 administrator : False `$ C:\Users\jrinker\Miniconda3\Scripts\conda install -c dsdale24 pyqt5` Traceback (most recent call last): File "C:\Users\jrinker\Miniconda3\lib\site-packages\conda\exceptions.py", line 640, in conda_exception_handler return_value = func(*args, **kwargs) File "C:\Users\jrinker\Miniconda3\lib\site-packages\conda\cli\main.py", line 140, in _main exit_code = args.func(args, p) File "C:\Users\jrinker\Miniconda3\lib\site-packages\conda\cli\main_install.py", line 80, in execute install(args, parser, 'install') File "C:\Users\jrinker\Miniconda3\lib\site-packages\conda\cli\install.py", line 160, in install linked_dists = install_linked(prefix) File "C:\Users\jrinker\Miniconda3\lib\site-packages\conda\core\linked_data.py", line 123, in linked return set(linked_data(prefix, ignore_channels=ignore_channels).keys()) File "C:\Users\jrinker\Miniconda3\lib\site-packages\conda\core\linked_data.py", line 115, in linked_data load_linked_data(prefix, dist_name, ignore_channels=ignore_channels) File "C:\Users\jrinker\Miniconda3\lib\site-packages\conda\core\linked_data.py", line 68, in load_linked_data linked_data_[prefix][dist] = rec = IndexRecord(**rec) File "C:\Users\jrinker\Miniconda3\lib\site-packages\conda\_vendor\auxlib\entity.py", line 702, in __call__ instance = super(EntityType, cls).__call__(*args, **kwargs) File "C:\Users\jrinker\Miniconda3\lib\site-packages\conda\_vendor\auxlib\entity.py", line 719, in __init__ setattr(self, key, kwargs[key]) File "C:\Users\jrinker\Miniconda3\lib\site-packages\conda\_vendor\auxlib\entity.py", line 424, in __set__ instance.__dict__[self.name] = self.validate(instance, self.box(instance, val)) File "C:\Users\jrinker\Miniconda3\lib\site-packages\conda\_vendor\auxlib\entity.py", line 465, in validate raise ValidationError(getattr(self, 'name', 'undefined name'), val) conda._vendor.auxlib.exceptions.ValidationError: Invalid value 1505856869.685 for timestamp ``` ### things I've tried - Removing conda forge from channels - `conda update conda` (still prints out error) </issue> <code> [start of conda/models/index_record.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 from functools import total_ordering 5 6 from .enums import LinkType, NoarchType, Platform 7 from .._vendor.auxlib.entity import (BooleanField, ComposableField, DictSafeMixin, Entity, 8 EnumField, Field, IntegerField, ListField, MapField, 9 StringField, NumberField) 10 from ..common.compat import string_types 11 12 13 @total_ordering 14 class Priority(object): 15 16 def __init__(self, priority): 17 self._priority = priority 18 19 def __int__(self): 20 return self._priority 21 22 def __lt__(self, other): 23 return self._priority < int(other) 24 25 def __eq__(self, other): 26 return self._priority == int(other) 27 28 def __repr__(self): 29 return "Priority(%d)" % self._priority 30 31 32 class PriorityField(Field): 33 _type = (int, Priority) 34 35 def unbox(self, instance, instance_type, val): 36 return int(val) 37 38 39 class LinkTypeField(EnumField): 40 def box(self, instance, val): 41 if isinstance(val, string_types): 42 val = val.replace('-', '').replace('_', '').lower() 43 if val == 'hard': 44 val = LinkType.hardlink 45 elif val == 'soft': 46 val = LinkType.softlink 47 return super(LinkTypeField, self).box(instance, val) 48 49 50 class NoarchField(EnumField): 51 def box(self, instance, val): 52 return super(NoarchField, self).box(instance, NoarchType.coerce(val)) 53 54 55 class TimestampField(NumberField): 56 57 def box(self, instance, val): 58 val = super(TimestampField, self).box(instance, val) 59 if val and val > 253402300799: # 9999-12-31 60 val /= 1000 # convert milliseconds to seconds; see conda/conda-build#1988 61 return val 62 63 64 class Link(DictSafeMixin, Entity): 65 source = StringField() 66 type = LinkTypeField(LinkType, required=False) 67 68 69 EMPTY_LINK = Link(source='') 70 71 # TODO: eventually stop mixing Record with LinkedPackageData 72 # class LinkedPackageRecord(DictSafeMixin, Entity): 73 # arch = EnumField(Arch, nullable=True) 74 # build = StringField() 75 # build_number = IntegerField() 76 # channel = StringField(required=False) 77 # date = StringField(required=False) 78 # depends = ListField(string_types) 79 # files = ListField(string_types, required=False) 80 # license = StringField(required=False) 81 # link = ComposableField(Link, required=False) 82 # md5 = StringField(required=False, nullable=True) 83 # name = StringField() 84 # platform = EnumField(Platform) 85 # requires = ListField(string_types, required=False) 86 # size = IntegerField(required=False) 87 # subdir = StringField(required=False) 88 # url = StringField(required=False) 89 # version = StringField() 90 91 92 class IndexRecord(DictSafeMixin, Entity): 93 _lazy_validate = True 94 95 arch = StringField(required=False, nullable=True) 96 build = StringField() 97 build_number = IntegerField() 98 date = StringField(required=False) 99 depends = ListField(string_types, required=False) 100 features = StringField(required=False) 101 has_prefix = BooleanField(required=False) 102 license = StringField(required=False) 103 license_family = StringField(required=False) 104 md5 = StringField(required=False, nullable=True) 105 name = StringField() 106 noarch = NoarchField(NoarchType, required=False, nullable=True) 107 platform = EnumField(Platform, required=False, nullable=True) 108 requires = ListField(string_types, required=False) 109 size = IntegerField(required=False) 110 subdir = StringField(required=False) 111 timestamp = TimestampField(required=False) 112 track_features = StringField(required=False) 113 version = StringField() 114 115 fn = StringField(required=False, nullable=True) 116 schannel = StringField(required=False, nullable=True) 117 channel = StringField(required=False, nullable=True) 118 priority = PriorityField(required=False) 119 url = StringField(required=False, nullable=True) 120 auth = StringField(required=False, nullable=True) 121 122 files = ListField(string_types, default=(), required=False) 123 link = ComposableField(Link, required=False) 124 125 with_features_depends = MapField(required=False) 126 preferred_env = StringField(default=None, required=False, nullable=True) 127 [end of conda/models/index_record.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conda/models/index_record.py b/conda/models/index_record.py --- a/conda/models/index_record.py +++ b/conda/models/index_record.py @@ -54,12 +54,33 @@ class TimestampField(NumberField): - def box(self, instance, val): - val = super(TimestampField, self).box(instance, val) - if val and val > 253402300799: # 9999-12-31 - val /= 1000 # convert milliseconds to seconds; see conda/conda-build#1988 + # @staticmethod + # def _make_seconds(val): + # if val: + # val = int(val) + # if val > 253402300799: # 9999-12-31 + # val //= 1000 # convert milliseconds to seconds; see conda/conda-build#1988 + # return val + + @staticmethod + def _make_milliseconds(val): + if val: + if val < 253402300799: # 9999-12-31 + val *= 1000 # convert seconds to milliseconds + val = int(val) return val + def box(self, instance, val): + return self._make_milliseconds(super(TimestampField, self).box(instance, val)) + + def unbox(self, instance, instance_type, val): + return self._make_milliseconds( + super(TimestampField, self).unbox(instance, instance_type, val) + ) + + def dump(self, val): + return self._make_milliseconds(super(TimestampField, self).dump(val)) + class Link(DictSafeMixin, Entity): source = StringField()
{"golden_diff": "diff --git a/conda/models/index_record.py b/conda/models/index_record.py\n--- a/conda/models/index_record.py\n+++ b/conda/models/index_record.py\n@@ -54,12 +54,33 @@\n \n class TimestampField(NumberField):\n \n- def box(self, instance, val):\n- val = super(TimestampField, self).box(instance, val)\n- if val and val > 253402300799: # 9999-12-31\n- val /= 1000 # convert milliseconds to seconds; see conda/conda-build#1988\n+ # @staticmethod\n+ # def _make_seconds(val):\n+ # if val:\n+ # val = int(val)\n+ # if val > 253402300799: # 9999-12-31\n+ # val //= 1000 # convert milliseconds to seconds; see conda/conda-build#1988\n+ # return val\n+\n+ @staticmethod\n+ def _make_milliseconds(val):\n+ if val:\n+ if val < 253402300799: # 9999-12-31\n+ val *= 1000 # convert seconds to milliseconds\n+ val = int(val)\n return val\n \n+ def box(self, instance, val):\n+ return self._make_milliseconds(super(TimestampField, self).box(instance, val))\n+\n+ def unbox(self, instance, instance_type, val):\n+ return self._make_milliseconds(\n+ super(TimestampField, self).unbox(instance, instance_type, val)\n+ )\n+\n+ def dump(self, val):\n+ return self._make_milliseconds(super(TimestampField, self).dump(val))\n+\n \n class Link(DictSafeMixin, Entity):\n source = StringField()\n", "issue": "ValidationError: invalid value for timestamp\nMODERATOR EDIT: This issue relates to an error that looks something like\r\n\r\n ValidationError: Invalid value 1505856869.685 for timestamp\r\n\r\nThe issue relates to using conda 4.3.28, and then downgrading (presumably via conda-forge) to a previous version of conda, like 4.3.27.\r\n\r\n# SOLUTION #\r\n\r\n### To fix on macOS:\r\n\r\n # in the command below, set PATH_TO_ENVIRONMENT yourself\r\n sed -i '' -E 's|(\"timestamp\": [0-9]+)\\.|\\1|' /PATH_TO_ENVIRONMENT/conda-meta/*.json\r\n\r\n### To fix on Linux:\r\n\r\n # in the command below, set PATH_TO_ENVIRONMENT yourself\r\n sed -i -E 's|(\"timestamp\": [0-9]+)\\.|\\1|' /PATH_TO_ENVIRONMENT/conda-meta/*.json\r\n\r\n### To fix on Windows:\r\n\r\nOpen notepad, and copy the contents below to `c:\\fix_timestamps.py`\r\n\r\n```python\r\nPATH_TO_ENVIRONMENT=\"c:\\\\ProgramData\\\\Anaconda3\" # <-- fill this in yourself\r\n # backslashes must be doubled\r\n\r\nfrom glob import glob\r\nimport json\r\nimport os\r\nfor path in glob(os.path.join(PATH_TO_ENVIRONMENT, 'conda-meta', '*.json')):\r\n with open(path) as fh:\r\n content = json.load(fh)\r\n if 'timestamp' in content:\r\n old_timestamp = content['timestamp']\r\n content['timestamp'] = int(old_timestamp)\r\n if old_timestamp != content['timestamp']:\r\n with open(path, 'w') as fh:\r\n fh.write(json.dumps(content, indent=2, sort_keys=True, separators=(',', ': ')))\r\n```\r\n\r\nAlso, change the path in the variable `PATH_TO_ENVIRONMENT` to point to the conda environment you want to fix. Then run the script with `python c:\\fix_timestamps.py`.\r\n\r\n----\r\n\r\nEDITED: I realized this is not a pyqt issue, it's a conda issue\r\n\r\n### steps to reproduce\r\n\r\n1. Install 64-bit miniconda on windows 10 (version 4.3.27) \r\n2. conda update to `4.3.28-py36h9daa44c_0` \r\n3. conda install -c anaconda spyder \r\n4. conda config --add channels conda-forge \r\n\r\nNow any calls to conda results in the print-out below.\r\n\r\n### conda info\r\n\r\nThis prints any time I try to use a conda command.\r\n\r\n```conda install -c dsdale24 pyqt5\r\nAn unexpected error has occurred.\r\nPlease consider posting the following information to the\r\nconda GitHub issue tracker at:\r\n\r\n https://github.com/conda/conda/issues\r\n\r\n\r\n\r\nCurrent conda install:\r\n\r\n platform : win-64\r\n conda version : 4.3.27\r\n conda is private : False\r\n conda-env version : 4.3.27\r\n conda-build version : not installed\r\n python version : 3.6.2.final.0\r\n requests version : 2.18.4\r\n root environment : C:\\Users\\jrinker\\Miniconda3 (writable)\r\n default environment : C:\\Users\\jrinker\\Miniconda3\r\n envs directories : C:\\Users\\jrinker\\Miniconda3\\envs\r\n C:\\Users\\jrinker\\AppData\\Local\\conda\\conda\\envs\r\n C:\\Users\\jrinker\\.conda\\envs\r\n package cache : C:\\Users\\jrinker\\Miniconda3\\pkgs\r\n C:\\Users\\jrinker\\AppData\\Local\\conda\\conda\\pkgs\r\n channel URLs : https://conda.anaconda.org/dsdale24/win-64\r\n https://conda.anaconda.org/dsdale24/noarch\r\n https://conda.anaconda.org/conda-forge/win-64\r\n https://conda.anaconda.org/conda-forge/noarch\r\n https://repo.continuum.io/pkgs/main/win-64\r\n https://repo.continuum.io/pkgs/main/noarch\r\n https://repo.continuum.io/pkgs/free/win-64\r\n https://repo.continuum.io/pkgs/free/noarch\r\n https://repo.continuum.io/pkgs/r/win-64\r\n https://repo.continuum.io/pkgs/r/noarch\r\n https://repo.continuum.io/pkgs/pro/win-64\r\n https://repo.continuum.io/pkgs/pro/noarch\r\n https://repo.continuum.io/pkgs/msys2/win-64\r\n https://repo.continuum.io/pkgs/msys2/noarch\r\n config file : C:\\Users\\jrinker\\.condarc\r\n netrc file : None\r\n offline mode : False\r\n user-agent : conda/4.3.27 requests/2.18.4 CPython/3.6.2 Windows/10 Windows/10.0.15063\r\n administrator : False\r\n\r\n`$ C:\\Users\\jrinker\\Miniconda3\\Scripts\\conda install -c dsdale24 pyqt5`\r\n\r\n\r\n\r\n\r\n Traceback (most recent call last):\r\n File \"C:\\Users\\jrinker\\Miniconda3\\lib\\site-packages\\conda\\exceptions.py\", line 640, in conda_exception_handler\r\n return_value = func(*args, **kwargs)\r\n File \"C:\\Users\\jrinker\\Miniconda3\\lib\\site-packages\\conda\\cli\\main.py\", line 140, in _main\r\n exit_code = args.func(args, p)\r\n File \"C:\\Users\\jrinker\\Miniconda3\\lib\\site-packages\\conda\\cli\\main_install.py\", line 80, in execute\r\n install(args, parser, 'install')\r\n File \"C:\\Users\\jrinker\\Miniconda3\\lib\\site-packages\\conda\\cli\\install.py\", line 160, in install\r\n linked_dists = install_linked(prefix)\r\n File \"C:\\Users\\jrinker\\Miniconda3\\lib\\site-packages\\conda\\core\\linked_data.py\", line 123, in linked\r\n return set(linked_data(prefix, ignore_channels=ignore_channels).keys())\r\n File \"C:\\Users\\jrinker\\Miniconda3\\lib\\site-packages\\conda\\core\\linked_data.py\", line 115, in linked_data\r\n load_linked_data(prefix, dist_name, ignore_channels=ignore_channels)\r\n File \"C:\\Users\\jrinker\\Miniconda3\\lib\\site-packages\\conda\\core\\linked_data.py\", line 68, in load_linked_data\r\n linked_data_[prefix][dist] = rec = IndexRecord(**rec)\r\n File \"C:\\Users\\jrinker\\Miniconda3\\lib\\site-packages\\conda\\_vendor\\auxlib\\entity.py\", line 702, in __call__\r\n instance = super(EntityType, cls).__call__(*args, **kwargs)\r\n File \"C:\\Users\\jrinker\\Miniconda3\\lib\\site-packages\\conda\\_vendor\\auxlib\\entity.py\", line 719, in __init__\r\n setattr(self, key, kwargs[key])\r\n File \"C:\\Users\\jrinker\\Miniconda3\\lib\\site-packages\\conda\\_vendor\\auxlib\\entity.py\", line 424, in __set__\r\n instance.__dict__[self.name] = self.validate(instance, self.box(instance, val))\r\n File \"C:\\Users\\jrinker\\Miniconda3\\lib\\site-packages\\conda\\_vendor\\auxlib\\entity.py\", line 465, in validate\r\n raise ValidationError(getattr(self, 'name', 'undefined name'), val)\r\n conda._vendor.auxlib.exceptions.ValidationError: Invalid value 1505856869.685 for timestamp\r\n``` \r\n\r\n### things I've tried\r\n\r\n- Removing conda forge from channels\r\n- `conda update conda` (still prints out error)\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom functools import total_ordering\n\nfrom .enums import LinkType, NoarchType, Platform\nfrom .._vendor.auxlib.entity import (BooleanField, ComposableField, DictSafeMixin, Entity,\n EnumField, Field, IntegerField, ListField, MapField,\n StringField, NumberField)\nfrom ..common.compat import string_types\n\n\n@total_ordering\nclass Priority(object):\n\n def __init__(self, priority):\n self._priority = priority\n\n def __int__(self):\n return self._priority\n\n def __lt__(self, other):\n return self._priority < int(other)\n\n def __eq__(self, other):\n return self._priority == int(other)\n\n def __repr__(self):\n return \"Priority(%d)\" % self._priority\n\n\nclass PriorityField(Field):\n _type = (int, Priority)\n\n def unbox(self, instance, instance_type, val):\n return int(val)\n\n\nclass LinkTypeField(EnumField):\n def box(self, instance, val):\n if isinstance(val, string_types):\n val = val.replace('-', '').replace('_', '').lower()\n if val == 'hard':\n val = LinkType.hardlink\n elif val == 'soft':\n val = LinkType.softlink\n return super(LinkTypeField, self).box(instance, val)\n\n\nclass NoarchField(EnumField):\n def box(self, instance, val):\n return super(NoarchField, self).box(instance, NoarchType.coerce(val))\n\n\nclass TimestampField(NumberField):\n\n def box(self, instance, val):\n val = super(TimestampField, self).box(instance, val)\n if val and val > 253402300799: # 9999-12-31\n val /= 1000 # convert milliseconds to seconds; see conda/conda-build#1988\n return val\n\n\nclass Link(DictSafeMixin, Entity):\n source = StringField()\n type = LinkTypeField(LinkType, required=False)\n\n\nEMPTY_LINK = Link(source='')\n\n# TODO: eventually stop mixing Record with LinkedPackageData\n# class LinkedPackageRecord(DictSafeMixin, Entity):\n# arch = EnumField(Arch, nullable=True)\n# build = StringField()\n# build_number = IntegerField()\n# channel = StringField(required=False)\n# date = StringField(required=False)\n# depends = ListField(string_types)\n# files = ListField(string_types, required=False)\n# license = StringField(required=False)\n# link = ComposableField(Link, required=False)\n# md5 = StringField(required=False, nullable=True)\n# name = StringField()\n# platform = EnumField(Platform)\n# requires = ListField(string_types, required=False)\n# size = IntegerField(required=False)\n# subdir = StringField(required=False)\n# url = StringField(required=False)\n# version = StringField()\n\n\nclass IndexRecord(DictSafeMixin, Entity):\n _lazy_validate = True\n\n arch = StringField(required=False, nullable=True)\n build = StringField()\n build_number = IntegerField()\n date = StringField(required=False)\n depends = ListField(string_types, required=False)\n features = StringField(required=False)\n has_prefix = BooleanField(required=False)\n license = StringField(required=False)\n license_family = StringField(required=False)\n md5 = StringField(required=False, nullable=True)\n name = StringField()\n noarch = NoarchField(NoarchType, required=False, nullable=True)\n platform = EnumField(Platform, required=False, nullable=True)\n requires = ListField(string_types, required=False)\n size = IntegerField(required=False)\n subdir = StringField(required=False)\n timestamp = TimestampField(required=False)\n track_features = StringField(required=False)\n version = StringField()\n\n fn = StringField(required=False, nullable=True)\n schannel = StringField(required=False, nullable=True)\n channel = StringField(required=False, nullable=True)\n priority = PriorityField(required=False)\n url = StringField(required=False, nullable=True)\n auth = StringField(required=False, nullable=True)\n\n files = ListField(string_types, default=(), required=False)\n link = ComposableField(Link, required=False)\n\n with_features_depends = MapField(required=False)\n preferred_env = StringField(default=None, required=False, nullable=True)\n", "path": "conda/models/index_record.py"}]}
3,573
448
gh_patches_debug_39850
rasdani/github-patches
git_diff
mozilla__pontoon-3075
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Store pretranslations for each locale separately We currently retrieve pretranslations for each locale separately, and store them all at once with a single DB query (for all locales). That makes our DB performance a little bit better, but risks the need for additional retrieval of already retrieved pretranslations in case of an error. Since retrieving pretranslations is significantly slower than writing them to the DB, we should write to DB for each locale separately. </issue> <code> [start of pontoon/pretranslation/tasks.py] 1 import logging 2 3 from django.db.models import Q, CharField, Value as V 4 from django.db.models.functions import Concat 5 from django.conf import settings 6 from pontoon.base.models import ( 7 Project, 8 Entity, 9 TranslatedResource, 10 Translation, 11 User, 12 ) 13 from pontoon.actionlog.models import ActionLog 14 from pontoon.pretranslation import AUTHORS 15 from pontoon.pretranslation.pretranslate import ( 16 get_pretranslations, 17 update_changed_instances, 18 ) 19 from pontoon.base.tasks import PontoonTask 20 from pontoon.sync.core import serial_task 21 from pontoon.checks.libraries import run_checks 22 from pontoon.checks.utils import bulk_run_checks 23 24 25 log = logging.getLogger(__name__) 26 27 28 @serial_task(settings.SYNC_TASK_TIMEOUT, base=PontoonTask, lock_key="project={0}") 29 def pretranslate(self, project_pk, locales=None, entities=None): 30 """ 31 Identifies strings without any translations and any suggestions. 32 Engages TheAlgorithm (bug 1552796) to gather pretranslations. 33 Stores pretranslations as suggestions (approved=False) to DB. 34 35 :arg project_pk: the pk of the project to be pretranslated 36 :arg Queryset locales: the locales for the project to be pretranslated 37 :arg Queryset entites: the entities for the project to be pretranslated 38 39 :returns: None 40 """ 41 project = Project.objects.get(pk=project_pk) 42 43 if not project.pretranslation_enabled: 44 log.info(f"Pretranslation not enabled for project {project.name}") 45 return 46 47 if locales: 48 locales = project.locales.filter(pk__in=locales) 49 else: 50 locales = project.locales 51 52 locales = locales.filter( 53 project_locale__project=project, 54 project_locale__pretranslation_enabled=True, 55 project_locale__readonly=False, 56 ) 57 58 if not locales: 59 log.info( 60 f"Pretranslation not enabled for any locale within project {project.name}" 61 ) 62 return 63 64 log.info(f"Fetching pretranslations for project {project.name} started") 65 66 if not entities: 67 entities = Entity.objects.filter( 68 resource__project=project, 69 obsolete=False, 70 ) 71 72 entities = entities.prefetch_related("resource") 73 74 # Fetch all available locale-resource pairs (TranslatedResource objects) 75 tr_pairs = ( 76 TranslatedResource.objects.filter( 77 resource__project=project, 78 locale__in=locales, 79 ) 80 .annotate( 81 locale_resource=Concat( 82 "locale_id", V("-"), "resource_id", output_field=CharField() 83 ) 84 ) 85 .values_list("locale_resource", flat=True) 86 .distinct() 87 ) 88 89 # Fetch all locale-entity pairs with non-rejected or pretranslated translations 90 pt_authors = [User.objects.get(email=email) for email in AUTHORS.values()] 91 translated_entities = ( 92 Translation.objects.filter( 93 locale__in=locales, 94 entity__in=entities, 95 ) 96 .filter(Q(rejected=False) | Q(user__in=pt_authors)) 97 .annotate( 98 locale_entity=Concat( 99 "locale_id", V("-"), "entity_id", output_field=CharField() 100 ) 101 ) 102 .values_list("locale_entity", flat=True) 103 .distinct() 104 ) 105 106 translated_entities = list(translated_entities) 107 108 translations = [] 109 110 # To keep track of changed TranslatedResources and their latest_translation 111 tr_dict = {} 112 113 tr_filter = [] 114 index = -1 115 116 for locale in locales: 117 log.info(f"Fetching pretranslations for locale {locale.code} started") 118 for entity in entities: 119 locale_entity = f"{locale.id}-{entity.id}" 120 locale_resource = f"{locale.id}-{entity.resource.id}" 121 if locale_entity in translated_entities or locale_resource not in tr_pairs: 122 continue 123 124 pretranslations = get_pretranslations(entity, locale) 125 126 if not pretranslations: 127 continue 128 129 failed_checks = run_checks( 130 entity, 131 locale.code, 132 entity.string, 133 pretranslations[0][0], 134 use_tt_checks=False, 135 ) 136 137 if failed_checks: 138 pretranslations = get_pretranslations( 139 entity, locale, preserve_placeables=True 140 ) 141 142 for string, plural_form, user in pretranslations: 143 t = Translation( 144 entity=entity, 145 locale=locale, 146 string=string, 147 user=user, 148 approved=False, 149 pretranslated=True, 150 active=True, 151 plural_form=plural_form, 152 ) 153 154 index += 1 155 translations.append(t) 156 157 if locale_resource not in tr_dict: 158 tr_dict[locale_resource] = index 159 160 # Add query for fetching respective TranslatedResource. 161 tr_filter.append( 162 Q(locale__id=locale.id) & Q(resource__id=entity.resource.id) 163 ) 164 165 # Update the latest translation index 166 tr_dict[locale_resource] = index 167 168 log.info(f"Fetching pretranslations for locale {locale.code} done") 169 170 if len(translations) == 0: 171 return 172 173 translations = Translation.objects.bulk_create(translations) 174 175 # Log creating actions 176 actions_to_log = [ 177 ActionLog( 178 action_type=ActionLog.ActionType.TRANSLATION_CREATED, 179 performed_by=t.user, 180 translation=t, 181 ) 182 for t in translations 183 ] 184 185 ActionLog.objects.bulk_create(actions_to_log) 186 187 # Run checks on all translations 188 translation_pks = {translation.pk for translation in translations} 189 bulk_run_checks(Translation.objects.for_checks().filter(pk__in=translation_pks)) 190 191 # Mark translations as changed 192 changed_translations = Translation.objects.filter( 193 pk__in=translation_pks, 194 # Do not sync translations with errors and warnings 195 errors__isnull=True, 196 warnings__isnull=True, 197 ) 198 changed_translations.bulk_mark_changed() 199 200 # Update latest activity and stats for changed instances. 201 update_changed_instances(tr_filter, tr_dict, translations) 202 203 log.info(f"Fetching pretranslations for project {project.name} done") 204 [end of pontoon/pretranslation/tasks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pontoon/pretranslation/tasks.py b/pontoon/pretranslation/tasks.py --- a/pontoon/pretranslation/tasks.py +++ b/pontoon/pretranslation/tasks.py @@ -105,16 +105,16 @@ translated_entities = list(translated_entities) - translations = [] + for locale in locales: + log.info(f"Fetching pretranslations for locale {locale.code} started") - # To keep track of changed TranslatedResources and their latest_translation - tr_dict = {} + translations = [] - tr_filter = [] - index = -1 + # To keep track of changed TranslatedResources and their latest_translation + tr_dict = {} + tr_filter = [] + index = -1 - for locale in locales: - log.info(f"Fetching pretranslations for locale {locale.code} started") for entity in entities: locale_entity = f"{locale.id}-{entity.id}" locale_resource = f"{locale.id}-{entity.resource.id}" @@ -165,39 +165,42 @@ # Update the latest translation index tr_dict[locale_resource] = index - log.info(f"Fetching pretranslations for locale {locale.code} done") + if len(translations) == 0: + log.info( + f"Fetching pretranslations for locale {locale.code} done: No pretranslation fetched" + ) + continue - if len(translations) == 0: - return + translations = Translation.objects.bulk_create(translations) - translations = Translation.objects.bulk_create(translations) + # Log creating actions + actions_to_log = [ + ActionLog( + action_type=ActionLog.ActionType.TRANSLATION_CREATED, + performed_by=t.user, + translation=t, + ) + for t in translations + ] - # Log creating actions - actions_to_log = [ - ActionLog( - action_type=ActionLog.ActionType.TRANSLATION_CREATED, - performed_by=t.user, - translation=t, - ) - for t in translations - ] + ActionLog.objects.bulk_create(actions_to_log) - ActionLog.objects.bulk_create(actions_to_log) + # Run checks on all translations + translation_pks = {translation.pk for translation in translations} + bulk_run_checks(Translation.objects.for_checks().filter(pk__in=translation_pks)) - # Run checks on all translations - translation_pks = {translation.pk for translation in translations} - bulk_run_checks(Translation.objects.for_checks().filter(pk__in=translation_pks)) + # Mark translations as changed + changed_translations = Translation.objects.filter( + pk__in=translation_pks, + # Do not sync translations with errors and warnings + errors__isnull=True, + warnings__isnull=True, + ) + changed_translations.bulk_mark_changed() - # Mark translations as changed - changed_translations = Translation.objects.filter( - pk__in=translation_pks, - # Do not sync translations with errors and warnings - errors__isnull=True, - warnings__isnull=True, - ) - changed_translations.bulk_mark_changed() + # Update latest activity and stats for changed instances. + update_changed_instances(tr_filter, tr_dict, translations) - # Update latest activity and stats for changed instances. - update_changed_instances(tr_filter, tr_dict, translations) + log.info(f"Fetching pretranslations for locale {locale.code} done") log.info(f"Fetching pretranslations for project {project.name} done")
{"golden_diff": "diff --git a/pontoon/pretranslation/tasks.py b/pontoon/pretranslation/tasks.py\n--- a/pontoon/pretranslation/tasks.py\n+++ b/pontoon/pretranslation/tasks.py\n@@ -105,16 +105,16 @@\n \n translated_entities = list(translated_entities)\n \n- translations = []\n+ for locale in locales:\n+ log.info(f\"Fetching pretranslations for locale {locale.code} started\")\n \n- # To keep track of changed TranslatedResources and their latest_translation\n- tr_dict = {}\n+ translations = []\n \n- tr_filter = []\n- index = -1\n+ # To keep track of changed TranslatedResources and their latest_translation\n+ tr_dict = {}\n+ tr_filter = []\n+ index = -1\n \n- for locale in locales:\n- log.info(f\"Fetching pretranslations for locale {locale.code} started\")\n for entity in entities:\n locale_entity = f\"{locale.id}-{entity.id}\"\n locale_resource = f\"{locale.id}-{entity.resource.id}\"\n@@ -165,39 +165,42 @@\n # Update the latest translation index\n tr_dict[locale_resource] = index\n \n- log.info(f\"Fetching pretranslations for locale {locale.code} done\")\n+ if len(translations) == 0:\n+ log.info(\n+ f\"Fetching pretranslations for locale {locale.code} done: No pretranslation fetched\"\n+ )\n+ continue\n \n- if len(translations) == 0:\n- return\n+ translations = Translation.objects.bulk_create(translations)\n \n- translations = Translation.objects.bulk_create(translations)\n+ # Log creating actions\n+ actions_to_log = [\n+ ActionLog(\n+ action_type=ActionLog.ActionType.TRANSLATION_CREATED,\n+ performed_by=t.user,\n+ translation=t,\n+ )\n+ for t in translations\n+ ]\n \n- # Log creating actions\n- actions_to_log = [\n- ActionLog(\n- action_type=ActionLog.ActionType.TRANSLATION_CREATED,\n- performed_by=t.user,\n- translation=t,\n- )\n- for t in translations\n- ]\n+ ActionLog.objects.bulk_create(actions_to_log)\n \n- ActionLog.objects.bulk_create(actions_to_log)\n+ # Run checks on all translations\n+ translation_pks = {translation.pk for translation in translations}\n+ bulk_run_checks(Translation.objects.for_checks().filter(pk__in=translation_pks))\n \n- # Run checks on all translations\n- translation_pks = {translation.pk for translation in translations}\n- bulk_run_checks(Translation.objects.for_checks().filter(pk__in=translation_pks))\n+ # Mark translations as changed\n+ changed_translations = Translation.objects.filter(\n+ pk__in=translation_pks,\n+ # Do not sync translations with errors and warnings\n+ errors__isnull=True,\n+ warnings__isnull=True,\n+ )\n+ changed_translations.bulk_mark_changed()\n \n- # Mark translations as changed\n- changed_translations = Translation.objects.filter(\n- pk__in=translation_pks,\n- # Do not sync translations with errors and warnings\n- errors__isnull=True,\n- warnings__isnull=True,\n- )\n- changed_translations.bulk_mark_changed()\n+ # Update latest activity and stats for changed instances.\n+ update_changed_instances(tr_filter, tr_dict, translations)\n \n- # Update latest activity and stats for changed instances.\n- update_changed_instances(tr_filter, tr_dict, translations)\n+ log.info(f\"Fetching pretranslations for locale {locale.code} done\")\n \n log.info(f\"Fetching pretranslations for project {project.name} done\")\n", "issue": "Store pretranslations for each locale separately\nWe currently retrieve pretranslations for each locale separately, and store them all at once with a single DB query (for all locales). That makes our DB performance a little bit better, but risks the need for additional retrieval of already retrieved pretranslations in case of an error.\r\n\r\nSince retrieving pretranslations is significantly slower than writing them to the DB, we should write to DB for each locale separately.\n", "before_files": [{"content": "import logging\n\nfrom django.db.models import Q, CharField, Value as V\nfrom django.db.models.functions import Concat\nfrom django.conf import settings\nfrom pontoon.base.models import (\n Project,\n Entity,\n TranslatedResource,\n Translation,\n User,\n)\nfrom pontoon.actionlog.models import ActionLog\nfrom pontoon.pretranslation import AUTHORS\nfrom pontoon.pretranslation.pretranslate import (\n get_pretranslations,\n update_changed_instances,\n)\nfrom pontoon.base.tasks import PontoonTask\nfrom pontoon.sync.core import serial_task\nfrom pontoon.checks.libraries import run_checks\nfrom pontoon.checks.utils import bulk_run_checks\n\n\nlog = logging.getLogger(__name__)\n\n\n@serial_task(settings.SYNC_TASK_TIMEOUT, base=PontoonTask, lock_key=\"project={0}\")\ndef pretranslate(self, project_pk, locales=None, entities=None):\n \"\"\"\n Identifies strings without any translations and any suggestions.\n Engages TheAlgorithm (bug 1552796) to gather pretranslations.\n Stores pretranslations as suggestions (approved=False) to DB.\n\n :arg project_pk: the pk of the project to be pretranslated\n :arg Queryset locales: the locales for the project to be pretranslated\n :arg Queryset entites: the entities for the project to be pretranslated\n\n :returns: None\n \"\"\"\n project = Project.objects.get(pk=project_pk)\n\n if not project.pretranslation_enabled:\n log.info(f\"Pretranslation not enabled for project {project.name}\")\n return\n\n if locales:\n locales = project.locales.filter(pk__in=locales)\n else:\n locales = project.locales\n\n locales = locales.filter(\n project_locale__project=project,\n project_locale__pretranslation_enabled=True,\n project_locale__readonly=False,\n )\n\n if not locales:\n log.info(\n f\"Pretranslation not enabled for any locale within project {project.name}\"\n )\n return\n\n log.info(f\"Fetching pretranslations for project {project.name} started\")\n\n if not entities:\n entities = Entity.objects.filter(\n resource__project=project,\n obsolete=False,\n )\n\n entities = entities.prefetch_related(\"resource\")\n\n # Fetch all available locale-resource pairs (TranslatedResource objects)\n tr_pairs = (\n TranslatedResource.objects.filter(\n resource__project=project,\n locale__in=locales,\n )\n .annotate(\n locale_resource=Concat(\n \"locale_id\", V(\"-\"), \"resource_id\", output_field=CharField()\n )\n )\n .values_list(\"locale_resource\", flat=True)\n .distinct()\n )\n\n # Fetch all locale-entity pairs with non-rejected or pretranslated translations\n pt_authors = [User.objects.get(email=email) for email in AUTHORS.values()]\n translated_entities = (\n Translation.objects.filter(\n locale__in=locales,\n entity__in=entities,\n )\n .filter(Q(rejected=False) | Q(user__in=pt_authors))\n .annotate(\n locale_entity=Concat(\n \"locale_id\", V(\"-\"), \"entity_id\", output_field=CharField()\n )\n )\n .values_list(\"locale_entity\", flat=True)\n .distinct()\n )\n\n translated_entities = list(translated_entities)\n\n translations = []\n\n # To keep track of changed TranslatedResources and their latest_translation\n tr_dict = {}\n\n tr_filter = []\n index = -1\n\n for locale in locales:\n log.info(f\"Fetching pretranslations for locale {locale.code} started\")\n for entity in entities:\n locale_entity = f\"{locale.id}-{entity.id}\"\n locale_resource = f\"{locale.id}-{entity.resource.id}\"\n if locale_entity in translated_entities or locale_resource not in tr_pairs:\n continue\n\n pretranslations = get_pretranslations(entity, locale)\n\n if not pretranslations:\n continue\n\n failed_checks = run_checks(\n entity,\n locale.code,\n entity.string,\n pretranslations[0][0],\n use_tt_checks=False,\n )\n\n if failed_checks:\n pretranslations = get_pretranslations(\n entity, locale, preserve_placeables=True\n )\n\n for string, plural_form, user in pretranslations:\n t = Translation(\n entity=entity,\n locale=locale,\n string=string,\n user=user,\n approved=False,\n pretranslated=True,\n active=True,\n plural_form=plural_form,\n )\n\n index += 1\n translations.append(t)\n\n if locale_resource not in tr_dict:\n tr_dict[locale_resource] = index\n\n # Add query for fetching respective TranslatedResource.\n tr_filter.append(\n Q(locale__id=locale.id) & Q(resource__id=entity.resource.id)\n )\n\n # Update the latest translation index\n tr_dict[locale_resource] = index\n\n log.info(f\"Fetching pretranslations for locale {locale.code} done\")\n\n if len(translations) == 0:\n return\n\n translations = Translation.objects.bulk_create(translations)\n\n # Log creating actions\n actions_to_log = [\n ActionLog(\n action_type=ActionLog.ActionType.TRANSLATION_CREATED,\n performed_by=t.user,\n translation=t,\n )\n for t in translations\n ]\n\n ActionLog.objects.bulk_create(actions_to_log)\n\n # Run checks on all translations\n translation_pks = {translation.pk for translation in translations}\n bulk_run_checks(Translation.objects.for_checks().filter(pk__in=translation_pks))\n\n # Mark translations as changed\n changed_translations = Translation.objects.filter(\n pk__in=translation_pks,\n # Do not sync translations with errors and warnings\n errors__isnull=True,\n warnings__isnull=True,\n )\n changed_translations.bulk_mark_changed()\n\n # Update latest activity and stats for changed instances.\n update_changed_instances(tr_filter, tr_dict, translations)\n\n log.info(f\"Fetching pretranslations for project {project.name} done\")\n", "path": "pontoon/pretranslation/tasks.py"}]}
2,418
809
gh_patches_debug_6863
rasdani/github-patches
git_diff
prowler-cloud__prowler-2709
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> FileNotFoundError after version 3.8.1 ### Discussed in https://github.com/prowler-cloud/prowler/discussions/2707 <div type='discussions-op-text'> <sup>Originally posted by **cerontrustly** August 10, 2023</sup> Hello guys! After using version 3.8.1 Prowler stopped working for me showing the following error: `Traceback (most recent call last): File "/home/prowler/.local/bin/prowler", line 8, in <module> sys.exit(prowler()) File "/home/prowler/.local/lib/python3.9/site-packages/prowler/__main__.py", line 222, in prowler resolve_security_hub_previous_findings( File "/home/prowler/.local/lib/python3.9/site-packages/prowler/providers/aws/lib/security_hub/security_hub.py", line 66, in resolve_security_hub_previous_findings with open(f"{output_directory}/{output_filename}{json_asff_file_suffix}") as f: FileNotFoundError: [Errno 2] No such file or directory: '/home/prowler/output/None.asff.json'` My command line looks like this: `docker run -v /tmp:/prowler/tmp toniblyx/prowler:stable -f us-west-2 -M json-asff -S -z` I think it can be related to [this](https://github.com/prowler-cloud/prowler/pull/2687) change. Can somebody you help me? Thank you!</div> </issue> <code> [start of prowler/__main__.py] 1 #!/usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 4 import os 5 import sys 6 7 from prowler.lib.banner import print_banner 8 from prowler.lib.check.check import ( 9 bulk_load_checks_metadata, 10 bulk_load_compliance_frameworks, 11 exclude_checks_to_run, 12 exclude_services_to_run, 13 execute_checks, 14 list_categories, 15 list_services, 16 parse_checks_from_folder, 17 print_categories, 18 print_checks, 19 print_compliance_frameworks, 20 print_compliance_requirements, 21 print_services, 22 remove_custom_checks_module, 23 ) 24 from prowler.lib.check.checks_loader import load_checks_to_execute 25 from prowler.lib.check.compliance import update_checks_metadata_with_compliance 26 from prowler.lib.cli.parser import ProwlerArgumentParser 27 from prowler.lib.logger import logger, set_logging_config 28 from prowler.lib.outputs.compliance import display_compliance_table 29 from prowler.lib.outputs.html import add_html_footer, fill_html_overview_statistics 30 from prowler.lib.outputs.json import close_json 31 from prowler.lib.outputs.outputs import extract_findings_statistics, send_to_s3_bucket 32 from prowler.lib.outputs.slack import send_slack_message 33 from prowler.lib.outputs.summary_table import display_summary_table 34 from prowler.providers.aws.lib.security_hub.security_hub import ( 35 resolve_security_hub_previous_findings, 36 ) 37 from prowler.providers.common.allowlist import set_provider_allowlist 38 from prowler.providers.common.audit_info import ( 39 set_provider_audit_info, 40 set_provider_execution_parameters, 41 ) 42 from prowler.providers.common.outputs import set_provider_output_options 43 from prowler.providers.common.quick_inventory import run_provider_quick_inventory 44 45 46 def prowler(): 47 # Parse Arguments 48 parser = ProwlerArgumentParser() 49 args = parser.parse() 50 51 # Save Arguments 52 provider = args.provider 53 checks = args.checks 54 excluded_checks = args.excluded_checks 55 excluded_services = args.excluded_services 56 services = args.services 57 categories = args.categories 58 checks_file = args.checks_file 59 checks_folder = args.checks_folder 60 severities = args.severity 61 compliance_framework = args.compliance 62 63 if not args.no_banner: 64 print_banner(args) 65 66 # We treat the compliance framework as another output format 67 if compliance_framework: 68 args.output_modes.extend(compliance_framework) 69 70 # Set Logger configuration 71 set_logging_config(args.log_level, args.log_file, args.only_logs) 72 73 if args.list_services: 74 print_services(list_services(provider)) 75 sys.exit() 76 77 # Load checks metadata 78 logger.debug("Loading checks metadata from .metadata.json files") 79 bulk_checks_metadata = bulk_load_checks_metadata(provider) 80 81 if args.list_categories: 82 print_categories(list_categories(bulk_checks_metadata)) 83 sys.exit() 84 85 bulk_compliance_frameworks = {} 86 # Load compliance frameworks 87 logger.debug("Loading compliance frameworks from .json files") 88 89 bulk_compliance_frameworks = bulk_load_compliance_frameworks(provider) 90 # Complete checks metadata with the compliance framework specification 91 update_checks_metadata_with_compliance( 92 bulk_compliance_frameworks, bulk_checks_metadata 93 ) 94 if args.list_compliance: 95 print_compliance_frameworks(bulk_compliance_frameworks) 96 sys.exit() 97 if args.list_compliance_requirements: 98 print_compliance_requirements( 99 bulk_compliance_frameworks, args.list_compliance_requirements 100 ) 101 sys.exit() 102 103 # Load checks to execute 104 checks_to_execute = load_checks_to_execute( 105 bulk_checks_metadata, 106 bulk_compliance_frameworks, 107 checks_file, 108 checks, 109 services, 110 severities, 111 compliance_framework, 112 categories, 113 provider, 114 ) 115 116 # If -l/--list-checks passed as argument, print checks to execute and quit 117 if args.list_checks: 118 print_checks(provider, sorted(checks_to_execute), bulk_checks_metadata) 119 sys.exit() 120 121 # Set the audit info based on the selected provider 122 audit_info = set_provider_audit_info(provider, args.__dict__) 123 124 # Import custom checks from folder 125 if checks_folder: 126 parse_checks_from_folder(audit_info, checks_folder, provider) 127 128 # Exclude checks if -e/--excluded-checks 129 if excluded_checks: 130 checks_to_execute = exclude_checks_to_run(checks_to_execute, excluded_checks) 131 132 # Exclude services if --excluded-services 133 if excluded_services: 134 checks_to_execute = exclude_services_to_run( 135 checks_to_execute, excluded_services, provider 136 ) 137 138 # Once the audit_info is set and we have the eventual checks based on the resource identifier, 139 # it is time to check what Prowler's checks are going to be executed 140 if audit_info.audit_resources: 141 checks_to_execute = set_provider_execution_parameters(provider, audit_info) 142 143 # Sort final check list 144 checks_to_execute = sorted(checks_to_execute) 145 146 # Parse Allowlist 147 allowlist_file = set_provider_allowlist(provider, audit_info, args) 148 149 # Set output options based on the selected provider 150 audit_output_options = set_provider_output_options( 151 provider, args, audit_info, allowlist_file, bulk_checks_metadata 152 ) 153 154 # Run the quick inventory for the provider if available 155 if hasattr(args, "quick_inventory") and args.quick_inventory: 156 run_provider_quick_inventory(provider, audit_info, args) 157 sys.exit() 158 159 # Execute checks 160 findings = [] 161 if len(checks_to_execute): 162 findings = execute_checks( 163 checks_to_execute, provider, audit_info, audit_output_options 164 ) 165 else: 166 logger.error( 167 "There are no checks to execute. Please, check your input arguments" 168 ) 169 170 # Extract findings stats 171 stats = extract_findings_statistics(findings) 172 173 if args.slack: 174 if "SLACK_API_TOKEN" in os.environ and "SLACK_CHANNEL_ID" in os.environ: 175 _ = send_slack_message( 176 os.environ["SLACK_API_TOKEN"], 177 os.environ["SLACK_CHANNEL_ID"], 178 stats, 179 provider, 180 audit_info, 181 ) 182 else: 183 logger.critical( 184 "Slack integration needs SLACK_API_TOKEN and SLACK_CHANNEL_ID environment variables (see more in https://docs.prowler.cloud/en/latest/tutorials/integrations/#slack)." 185 ) 186 sys.exit(1) 187 188 if args.output_modes: 189 for mode in args.output_modes: 190 # Close json file if exists 191 if "json" in mode: 192 close_json( 193 audit_output_options.output_filename, args.output_directory, mode 194 ) 195 if mode == "html": 196 add_html_footer( 197 audit_output_options.output_filename, args.output_directory 198 ) 199 fill_html_overview_statistics( 200 stats, audit_output_options.output_filename, args.output_directory 201 ) 202 # Send output to S3 if needed (-B / -D) 203 if provider == "aws" and ( 204 args.output_bucket or args.output_bucket_no_assume 205 ): 206 output_bucket = args.output_bucket 207 bucket_session = audit_info.audit_session 208 # Check if -D was input 209 if args.output_bucket_no_assume: 210 output_bucket = args.output_bucket_no_assume 211 bucket_session = audit_info.original_session 212 send_to_s3_bucket( 213 audit_output_options.output_filename, 214 args.output_directory, 215 mode, 216 output_bucket, 217 bucket_session, 218 ) 219 220 # Resolve previous fails of Security Hub 221 if provider == "aws" and args.security_hub and not args.skip_sh_update: 222 resolve_security_hub_previous_findings( 223 args.output_directory, args.output_filename, audit_info 224 ) 225 226 # Display summary table 227 if not args.only_logs: 228 display_summary_table( 229 findings, 230 audit_info, 231 audit_output_options, 232 provider, 233 ) 234 235 if compliance_framework and findings: 236 for compliance in compliance_framework: 237 # Display compliance table 238 display_compliance_table( 239 findings, 240 bulk_checks_metadata, 241 compliance, 242 audit_output_options.output_filename, 243 audit_output_options.output_directory, 244 ) 245 246 # If custom checks were passed, remove the modules 247 if checks_folder: 248 remove_custom_checks_module(checks_folder, provider) 249 250 # If there are failed findings exit code 3, except if -z is input 251 if not args.ignore_exit_code_3 and stats["total_fail"] > 0: 252 sys.exit(3) 253 254 255 if __name__ == "__main__": 256 prowler() 257 [end of prowler/__main__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/prowler/__main__.py b/prowler/__main__.py --- a/prowler/__main__.py +++ b/prowler/__main__.py @@ -220,7 +220,9 @@ # Resolve previous fails of Security Hub if provider == "aws" and args.security_hub and not args.skip_sh_update: resolve_security_hub_previous_findings( - args.output_directory, args.output_filename, audit_info + audit_output_options.output_directory, + audit_output_options.output_filename, + audit_info, ) # Display summary table
{"golden_diff": "diff --git a/prowler/__main__.py b/prowler/__main__.py\n--- a/prowler/__main__.py\n+++ b/prowler/__main__.py\n@@ -220,7 +220,9 @@\n # Resolve previous fails of Security Hub\n if provider == \"aws\" and args.security_hub and not args.skip_sh_update:\n resolve_security_hub_previous_findings(\n- args.output_directory, args.output_filename, audit_info\n+ audit_output_options.output_directory,\n+ audit_output_options.output_filename,\n+ audit_info,\n )\n \n # Display summary table\n", "issue": "FileNotFoundError after version 3.8.1\n### Discussed in https://github.com/prowler-cloud/prowler/discussions/2707\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **cerontrustly** August 10, 2023</sup>\r\nHello guys!\r\n\r\nAfter using version 3.8.1 Prowler stopped working for me showing the following error:\r\n\r\n`Traceback (most recent call last):\r\n File \"/home/prowler/.local/bin/prowler\", line 8, in <module>\r\n sys.exit(prowler())\r\n File \"/home/prowler/.local/lib/python3.9/site-packages/prowler/__main__.py\", line 222, in prowler\r\n resolve_security_hub_previous_findings(\r\n File \"/home/prowler/.local/lib/python3.9/site-packages/prowler/providers/aws/lib/security_hub/security_hub.py\", line 66, in resolve_security_hub_previous_findings\r\n with open(f\"{output_directory}/{output_filename}{json_asff_file_suffix}\") as f:\r\nFileNotFoundError: [Errno 2] No such file or directory: '/home/prowler/output/None.asff.json'`\r\n\r\nMy command line looks like this:\r\n\r\n`docker run -v /tmp:/prowler/tmp toniblyx/prowler:stable -f us-west-2 -M json-asff -S -z`\r\n\r\nI think it can be related to [this](https://github.com/prowler-cloud/prowler/pull/2687) change.\r\n\r\nCan somebody you help me?\r\n\r\nThank you!</div>\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nfrom prowler.lib.banner import print_banner\nfrom prowler.lib.check.check import (\n bulk_load_checks_metadata,\n bulk_load_compliance_frameworks,\n exclude_checks_to_run,\n exclude_services_to_run,\n execute_checks,\n list_categories,\n list_services,\n parse_checks_from_folder,\n print_categories,\n print_checks,\n print_compliance_frameworks,\n print_compliance_requirements,\n print_services,\n remove_custom_checks_module,\n)\nfrom prowler.lib.check.checks_loader import load_checks_to_execute\nfrom prowler.lib.check.compliance import update_checks_metadata_with_compliance\nfrom prowler.lib.cli.parser import ProwlerArgumentParser\nfrom prowler.lib.logger import logger, set_logging_config\nfrom prowler.lib.outputs.compliance import display_compliance_table\nfrom prowler.lib.outputs.html import add_html_footer, fill_html_overview_statistics\nfrom prowler.lib.outputs.json import close_json\nfrom prowler.lib.outputs.outputs import extract_findings_statistics, send_to_s3_bucket\nfrom prowler.lib.outputs.slack import send_slack_message\nfrom prowler.lib.outputs.summary_table import display_summary_table\nfrom prowler.providers.aws.lib.security_hub.security_hub import (\n resolve_security_hub_previous_findings,\n)\nfrom prowler.providers.common.allowlist import set_provider_allowlist\nfrom prowler.providers.common.audit_info import (\n set_provider_audit_info,\n set_provider_execution_parameters,\n)\nfrom prowler.providers.common.outputs import set_provider_output_options\nfrom prowler.providers.common.quick_inventory import run_provider_quick_inventory\n\n\ndef prowler():\n # Parse Arguments\n parser = ProwlerArgumentParser()\n args = parser.parse()\n\n # Save Arguments\n provider = args.provider\n checks = args.checks\n excluded_checks = args.excluded_checks\n excluded_services = args.excluded_services\n services = args.services\n categories = args.categories\n checks_file = args.checks_file\n checks_folder = args.checks_folder\n severities = args.severity\n compliance_framework = args.compliance\n\n if not args.no_banner:\n print_banner(args)\n\n # We treat the compliance framework as another output format\n if compliance_framework:\n args.output_modes.extend(compliance_framework)\n\n # Set Logger configuration\n set_logging_config(args.log_level, args.log_file, args.only_logs)\n\n if args.list_services:\n print_services(list_services(provider))\n sys.exit()\n\n # Load checks metadata\n logger.debug(\"Loading checks metadata from .metadata.json files\")\n bulk_checks_metadata = bulk_load_checks_metadata(provider)\n\n if args.list_categories:\n print_categories(list_categories(bulk_checks_metadata))\n sys.exit()\n\n bulk_compliance_frameworks = {}\n # Load compliance frameworks\n logger.debug(\"Loading compliance frameworks from .json files\")\n\n bulk_compliance_frameworks = bulk_load_compliance_frameworks(provider)\n # Complete checks metadata with the compliance framework specification\n update_checks_metadata_with_compliance(\n bulk_compliance_frameworks, bulk_checks_metadata\n )\n if args.list_compliance:\n print_compliance_frameworks(bulk_compliance_frameworks)\n sys.exit()\n if args.list_compliance_requirements:\n print_compliance_requirements(\n bulk_compliance_frameworks, args.list_compliance_requirements\n )\n sys.exit()\n\n # Load checks to execute\n checks_to_execute = load_checks_to_execute(\n bulk_checks_metadata,\n bulk_compliance_frameworks,\n checks_file,\n checks,\n services,\n severities,\n compliance_framework,\n categories,\n provider,\n )\n\n # If -l/--list-checks passed as argument, print checks to execute and quit\n if args.list_checks:\n print_checks(provider, sorted(checks_to_execute), bulk_checks_metadata)\n sys.exit()\n\n # Set the audit info based on the selected provider\n audit_info = set_provider_audit_info(provider, args.__dict__)\n\n # Import custom checks from folder\n if checks_folder:\n parse_checks_from_folder(audit_info, checks_folder, provider)\n\n # Exclude checks if -e/--excluded-checks\n if excluded_checks:\n checks_to_execute = exclude_checks_to_run(checks_to_execute, excluded_checks)\n\n # Exclude services if --excluded-services\n if excluded_services:\n checks_to_execute = exclude_services_to_run(\n checks_to_execute, excluded_services, provider\n )\n\n # Once the audit_info is set and we have the eventual checks based on the resource identifier,\n # it is time to check what Prowler's checks are going to be executed\n if audit_info.audit_resources:\n checks_to_execute = set_provider_execution_parameters(provider, audit_info)\n\n # Sort final check list\n checks_to_execute = sorted(checks_to_execute)\n\n # Parse Allowlist\n allowlist_file = set_provider_allowlist(provider, audit_info, args)\n\n # Set output options based on the selected provider\n audit_output_options = set_provider_output_options(\n provider, args, audit_info, allowlist_file, bulk_checks_metadata\n )\n\n # Run the quick inventory for the provider if available\n if hasattr(args, \"quick_inventory\") and args.quick_inventory:\n run_provider_quick_inventory(provider, audit_info, args)\n sys.exit()\n\n # Execute checks\n findings = []\n if len(checks_to_execute):\n findings = execute_checks(\n checks_to_execute, provider, audit_info, audit_output_options\n )\n else:\n logger.error(\n \"There are no checks to execute. Please, check your input arguments\"\n )\n\n # Extract findings stats\n stats = extract_findings_statistics(findings)\n\n if args.slack:\n if \"SLACK_API_TOKEN\" in os.environ and \"SLACK_CHANNEL_ID\" in os.environ:\n _ = send_slack_message(\n os.environ[\"SLACK_API_TOKEN\"],\n os.environ[\"SLACK_CHANNEL_ID\"],\n stats,\n provider,\n audit_info,\n )\n else:\n logger.critical(\n \"Slack integration needs SLACK_API_TOKEN and SLACK_CHANNEL_ID environment variables (see more in https://docs.prowler.cloud/en/latest/tutorials/integrations/#slack).\"\n )\n sys.exit(1)\n\n if args.output_modes:\n for mode in args.output_modes:\n # Close json file if exists\n if \"json\" in mode:\n close_json(\n audit_output_options.output_filename, args.output_directory, mode\n )\n if mode == \"html\":\n add_html_footer(\n audit_output_options.output_filename, args.output_directory\n )\n fill_html_overview_statistics(\n stats, audit_output_options.output_filename, args.output_directory\n )\n # Send output to S3 if needed (-B / -D)\n if provider == \"aws\" and (\n args.output_bucket or args.output_bucket_no_assume\n ):\n output_bucket = args.output_bucket\n bucket_session = audit_info.audit_session\n # Check if -D was input\n if args.output_bucket_no_assume:\n output_bucket = args.output_bucket_no_assume\n bucket_session = audit_info.original_session\n send_to_s3_bucket(\n audit_output_options.output_filename,\n args.output_directory,\n mode,\n output_bucket,\n bucket_session,\n )\n\n # Resolve previous fails of Security Hub\n if provider == \"aws\" and args.security_hub and not args.skip_sh_update:\n resolve_security_hub_previous_findings(\n args.output_directory, args.output_filename, audit_info\n )\n\n # Display summary table\n if not args.only_logs:\n display_summary_table(\n findings,\n audit_info,\n audit_output_options,\n provider,\n )\n\n if compliance_framework and findings:\n for compliance in compliance_framework:\n # Display compliance table\n display_compliance_table(\n findings,\n bulk_checks_metadata,\n compliance,\n audit_output_options.output_filename,\n audit_output_options.output_directory,\n )\n\n # If custom checks were passed, remove the modules\n if checks_folder:\n remove_custom_checks_module(checks_folder, provider)\n\n # If there are failed findings exit code 3, except if -z is input\n if not args.ignore_exit_code_3 and stats[\"total_fail\"] > 0:\n sys.exit(3)\n\n\nif __name__ == \"__main__\":\n prowler()\n", "path": "prowler/__main__.py"}]}
3,333
132
gh_patches_debug_12132
rasdani/github-patches
git_diff
angr__angr-1862
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Strange successors of the return block of a function I'm analysing a MIPS binary when facing the problem. The problem exists in the funcition `do_ssc`. In the following block which has a return statement ![image](https://user-images.githubusercontent.com/8875073/69912053-4fb60900-145f-11ea-9152-4026aab7f033.png) When I run `node.successors` I got ``` In [103]: end.successors Out[103]: [<CFGNode 0x40a7a8[28]>, <CFGNode do_ssc+0x12c [28]>, <CFGNode do_ssc+0x4c4 [28]>, <CFGNode do_ssc+0x45c [24]>, <CFGNode do_ssc+0x2a8 [24]>] ``` Their addresses are `0x40a7a8`, `0x40a33c`, `0x40a6d4` and `0x40a4b8` respectively. I know the cfg of angr is interfunctional, however, only `0x40a7a8` is an caller of `do_ssc`. May I know why other threes exist? </issue> <code> [start of angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py] 1 2 import logging 3 4 import pyvex 5 import archinfo 6 7 8 from .... import options, BP_BEFORE 9 from ....blade import Blade 10 from ....annocfg import AnnotatedCFG 11 from ....exploration_techniques import Slicecutor 12 13 from .resolver import IndirectJumpResolver 14 15 16 l = logging.getLogger(name=__name__) 17 18 19 class MipsElfFastResolver(IndirectJumpResolver): 20 def __init__(self, project): 21 super(MipsElfFastResolver, self).__init__(project, timeless=True) 22 23 def filter(self, cfg, addr, func_addr, block, jumpkind): 24 if not isinstance(self.project.arch, (archinfo.ArchMIPS32, archinfo.ArchMIPS64, )): 25 return False 26 return True 27 28 def resolve(self, cfg, addr, func_addr, block, jumpkind): 29 """ 30 Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp. 31 32 :param cfg: A CFG instance. 33 :param int addr: IRSB address. 34 :param int func_addr: The function address. 35 :param pyvex.IRSB block: The IRSB. 36 :param str jumpkind: The jumpkind. 37 :return: If it was resolved and targets alongside it 38 :rtype: tuple 39 """ 40 41 project = self.project 42 43 b = Blade(cfg.graph, addr, -1, cfg=cfg, project=project, ignore_sp=True, ignore_bp=True, 44 ignored_regs=('gp',) 45 ) 46 47 sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0] 48 if not sources: 49 return False, [] 50 51 source = sources[0] 52 source_addr = source[0] 53 annotated_cfg = AnnotatedCFG(project, None, detect_loops=False) 54 annotated_cfg.from_digraph(b.slice) 55 56 state = project.factory.blank_state(addr=source_addr, mode="fastpath", 57 remove_options=options.refs 58 ) 59 func = cfg.kb.functions.function(addr=func_addr) 60 61 gp_offset = project.arch.registers['gp'][0] 62 if 'gp' not in func.info: 63 sec = project.loader.find_section_containing(func.addr) 64 if sec is None or sec.name != '.plt': 65 # this might a special case: gp is only used once in this function, and it can be initialized right before 66 # its use site. 67 # TODO: handle this case 68 l.debug('Failed to determine value of register gp for function %#x.', func.addr) 69 return False, [ ] 70 else: 71 state.regs.gp = func.info['gp'] 72 73 def overwrite_tmp_value(state): 74 state.inspect.tmp_write_expr = state.solver.BVV(func.info['gp'], state.arch.bits) 75 76 # Special handling for cases where `gp` is stored on the stack 77 got_gp_stack_store = False 78 for block_addr_in_slice in set(slice_node[0] for slice_node in b.slice.nodes()): 79 for stmt in project.factory.block(block_addr_in_slice).vex.statements: 80 if isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == gp_offset and \ 81 isinstance(stmt.data, pyvex.IRExpr.RdTmp): 82 tmp_offset = stmt.data.tmp # pylint:disable=cell-var-from-loop 83 # we must make sure value of that temporary variable equals to the correct gp value 84 state.inspect.make_breakpoint('tmp_write', when=BP_BEFORE, 85 condition=lambda s, bbl_addr_=block_addr_in_slice, 86 tmp_offset_=tmp_offset: 87 s.scratch.bbl_addr == bbl_addr_ and s.inspect.tmp_write_num == tmp_offset_, 88 action=overwrite_tmp_value 89 ) 90 got_gp_stack_store = True 91 break 92 if got_gp_stack_store: 93 break 94 95 simgr = self.project.factory.simulation_manager(state) 96 simgr.use_technique(Slicecutor(annotated_cfg)) 97 simgr.run() 98 99 if simgr.cut: 100 target = simgr.cut[0].addr 101 102 if self._is_target_valid(cfg, target): 103 l.debug("Indirect jump at %#x is resolved to target %#x.", addr, target) 104 return True, [ target ] 105 106 l.debug("Indirect jump at %#x is resolved to target %#x, which seems to be invalid.", addr, target) 107 return False, [ ] 108 109 l.debug("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self)) 110 return False, [ ] 111 [end of angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py b/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py --- a/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py +++ b/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py @@ -97,7 +97,13 @@ simgr.run() if simgr.cut: - target = simgr.cut[0].addr + # pick the successor that is cut right after executing `addr` + try: + target_state = next(iter(cut for cut in simgr.cut if cut.history.addr == addr)) + except StopIteration: + l.debug("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self)) + return False, [ ] + target = target_state.addr if self._is_target_valid(cfg, target): l.debug("Indirect jump at %#x is resolved to target %#x.", addr, target)
{"golden_diff": "diff --git a/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py b/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\n--- a/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\n+++ b/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\n@@ -97,7 +97,13 @@\n simgr.run()\n \n if simgr.cut:\n- target = simgr.cut[0].addr\n+ # pick the successor that is cut right after executing `addr`\n+ try:\n+ target_state = next(iter(cut for cut in simgr.cut if cut.history.addr == addr))\n+ except StopIteration:\n+ l.debug(\"Indirect jump at %#x cannot be resolved by %s.\", addr, repr(self))\n+ return False, [ ]\n+ target = target_state.addr\n \n if self._is_target_valid(cfg, target):\n l.debug(\"Indirect jump at %#x is resolved to target %#x.\", addr, target)\n", "issue": "Strange successors of the return block of a function\nI'm analysing a MIPS binary when facing the problem.\r\n\r\nThe problem exists in the funcition `do_ssc`.\r\n\r\nIn the following block which has a return statement\r\n![image](https://user-images.githubusercontent.com/8875073/69912053-4fb60900-145f-11ea-9152-4026aab7f033.png)\r\n\r\nWhen I run `node.successors` I got\r\n```\r\nIn [103]: end.successors \r\nOut[103]: \r\n[<CFGNode 0x40a7a8[28]>,\r\n <CFGNode do_ssc+0x12c [28]>,\r\n <CFGNode do_ssc+0x4c4 [28]>,\r\n <CFGNode do_ssc+0x45c [24]>,\r\n <CFGNode do_ssc+0x2a8 [24]>]\r\n```\r\nTheir addresses are `0x40a7a8`, `0x40a33c`, `0x40a6d4` and `0x40a4b8` respectively.\r\n\r\nI know the cfg of angr is interfunctional, however, only `0x40a7a8` is an caller of `do_ssc`.\r\n\r\nMay I know why other threes exist?\r\n\r\n\r\n\n", "before_files": [{"content": "\nimport logging\n\nimport pyvex\nimport archinfo\n\n\nfrom .... import options, BP_BEFORE\nfrom ....blade import Blade\nfrom ....annocfg import AnnotatedCFG\nfrom ....exploration_techniques import Slicecutor\n\nfrom .resolver import IndirectJumpResolver\n\n\nl = logging.getLogger(name=__name__)\n\n\nclass MipsElfFastResolver(IndirectJumpResolver):\n def __init__(self, project):\n super(MipsElfFastResolver, self).__init__(project, timeless=True)\n\n def filter(self, cfg, addr, func_addr, block, jumpkind):\n if not isinstance(self.project.arch, (archinfo.ArchMIPS32, archinfo.ArchMIPS64, )):\n return False\n return True\n\n def resolve(self, cfg, addr, func_addr, block, jumpkind):\n \"\"\"\n Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp.\n\n :param cfg: A CFG instance.\n :param int addr: IRSB address.\n :param int func_addr: The function address.\n :param pyvex.IRSB block: The IRSB.\n :param str jumpkind: The jumpkind.\n :return: If it was resolved and targets alongside it\n :rtype: tuple\n \"\"\"\n\n project = self.project\n\n b = Blade(cfg.graph, addr, -1, cfg=cfg, project=project, ignore_sp=True, ignore_bp=True,\n ignored_regs=('gp',)\n )\n\n sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]\n if not sources:\n return False, []\n\n source = sources[0]\n source_addr = source[0]\n annotated_cfg = AnnotatedCFG(project, None, detect_loops=False)\n annotated_cfg.from_digraph(b.slice)\n\n state = project.factory.blank_state(addr=source_addr, mode=\"fastpath\",\n remove_options=options.refs\n )\n func = cfg.kb.functions.function(addr=func_addr)\n\n gp_offset = project.arch.registers['gp'][0]\n if 'gp' not in func.info:\n sec = project.loader.find_section_containing(func.addr)\n if sec is None or sec.name != '.plt':\n # this might a special case: gp is only used once in this function, and it can be initialized right before\n # its use site.\n # TODO: handle this case\n l.debug('Failed to determine value of register gp for function %#x.', func.addr)\n return False, [ ]\n else:\n state.regs.gp = func.info['gp']\n\n def overwrite_tmp_value(state):\n state.inspect.tmp_write_expr = state.solver.BVV(func.info['gp'], state.arch.bits)\n\n # Special handling for cases where `gp` is stored on the stack\n got_gp_stack_store = False\n for block_addr_in_slice in set(slice_node[0] for slice_node in b.slice.nodes()):\n for stmt in project.factory.block(block_addr_in_slice).vex.statements:\n if isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == gp_offset and \\\n isinstance(stmt.data, pyvex.IRExpr.RdTmp):\n tmp_offset = stmt.data.tmp # pylint:disable=cell-var-from-loop\n # we must make sure value of that temporary variable equals to the correct gp value\n state.inspect.make_breakpoint('tmp_write', when=BP_BEFORE,\n condition=lambda s, bbl_addr_=block_addr_in_slice,\n tmp_offset_=tmp_offset:\n s.scratch.bbl_addr == bbl_addr_ and s.inspect.tmp_write_num == tmp_offset_,\n action=overwrite_tmp_value\n )\n got_gp_stack_store = True\n break\n if got_gp_stack_store:\n break\n\n simgr = self.project.factory.simulation_manager(state)\n simgr.use_technique(Slicecutor(annotated_cfg))\n simgr.run()\n\n if simgr.cut:\n target = simgr.cut[0].addr\n\n if self._is_target_valid(cfg, target):\n l.debug(\"Indirect jump at %#x is resolved to target %#x.\", addr, target)\n return True, [ target ]\n\n l.debug(\"Indirect jump at %#x is resolved to target %#x, which seems to be invalid.\", addr, target)\n return False, [ ]\n\n l.debug(\"Indirect jump at %#x cannot be resolved by %s.\", addr, repr(self))\n return False, [ ]\n", "path": "angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py"}]}
2,084
246
gh_patches_debug_1220
rasdani/github-patches
git_diff
DataBiosphere__toil-239
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Jenkins should only deploy to PyPI when building off the master branch </issue> <code> [start of setup.py] 1 from setuptools import setup, find_packages 2 3 setup( 4 name='toil', 5 version='3.0.4', 6 description='Pipeline management software for clusters.', 7 author='Benedict Paten', 8 author_email='benedict@soe.usc.edu', 9 url="https://github.com/BD2KGenomics/toil", 10 install_requires=['bd2k-python-lib>=1.7.dev1'], 11 extras_require={ 12 'mesos': [ 13 'mesos.interface==0.22.0', 14 'psutil==3.0.1' ], 15 'aws': [ 16 'boto==2.38.0' ] }, 17 package_dir={ '': 'src' }, 18 packages=find_packages( 'src', exclude=[ '*.test' ] ), 19 entry_points={ 20 'console_scripts': [ 21 'toilKill = toil.utils.toilKill:main', 22 'toilStatus = toil.utils.toilStatus:main', 23 'toilStats = toil.utils.toilStats:main', 24 'toilRestarts = toil.utils.toilRestarts:main', 25 'multijob = toil.batchSystems.multijob:main', 26 'toil-mesos-executor = toil.batchSystems.mesos.executor:main [mesos]'] } ) 27 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ setup( name='toil', - version='3.0.4', + version='3.0.5.dev1', description='Pipeline management software for clusters.', author='Benedict Paten', author_email='benedict@soe.usc.edu',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -2,7 +2,7 @@\n \n setup(\n name='toil',\n- version='3.0.4',\n+ version='3.0.5.dev1',\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='benedict@soe.usc.edu',\n", "issue": "Jenkins should only deploy to PyPI when building off the master branch\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nsetup(\n name='toil',\n version='3.0.4',\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='benedict@soe.usc.edu',\n url=\"https://github.com/BD2KGenomics/toil\",\n install_requires=['bd2k-python-lib>=1.7.dev1'],\n extras_require={\n 'mesos': [\n 'mesos.interface==0.22.0',\n 'psutil==3.0.1' ],\n 'aws': [\n 'boto==2.38.0' ] },\n package_dir={ '': 'src' },\n packages=find_packages( 'src', exclude=[ '*.test' ] ),\n entry_points={\n 'console_scripts': [\n 'toilKill = toil.utils.toilKill:main',\n 'toilStatus = toil.utils.toilStatus:main',\n 'toilStats = toil.utils.toilStats:main',\n 'toilRestarts = toil.utils.toilRestarts:main',\n 'multijob = toil.batchSystems.multijob:main',\n 'toil-mesos-executor = toil.batchSystems.mesos.executor:main [mesos]'] } )\n", "path": "setup.py"}]}
873
93
gh_patches_debug_59245
rasdani/github-patches
git_diff
facebookresearch__hydra-287
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug] example of override fail in multirun This fails `python examples/tutorial/5_composition/my_app.py -m db=mysql,postgresql db.user=omry` </issue> <code> [start of setup.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 import codecs 3 import distutils 4 import os 5 import re 6 import shutil 7 from os.path import join, exists, isdir 8 9 from setuptools import setup, find_packages 10 11 here = os.path.abspath(os.path.dirname(__file__)) 12 13 14 def read(*parts): 15 with codecs.open(os.path.join(here, *parts), "r") as fp: 16 return fp.read() 17 18 19 def find_version(*file_paths): 20 version_file = read(*file_paths) 21 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) 22 if version_match: 23 return version_match.group(1) 24 raise RuntimeError("Unable to find version string.") 25 26 27 class CleanCommand(distutils.cmd.Command): 28 """ 29 Our custom command to clean out junk files. 30 """ 31 32 description = "Cleans out junk files we don't want in the repo" 33 user_options = [] 34 35 def initialize_options(self): 36 pass 37 38 def finalize_options(self): 39 pass 40 41 @staticmethod 42 def find(root, includes, excludes=[]): 43 res = [] 44 for parent, dirs, files in os.walk(root): 45 for f in dirs + files: 46 add = list() 47 for include in includes: 48 if re.findall(include, f): 49 add.append(join(parent, f)) 50 res.extend(add) 51 final_list = [] 52 # Exclude things that matches an exclude pattern 53 for ex in excludes: 54 for file in res: 55 if not re.findall(ex, file): 56 final_list.append(file) 57 return final_list 58 59 def run(self): 60 delete_patterns = [ 61 ".eggs", 62 ".egg-info", 63 ".pytest_cache", 64 "build", 65 "dist", 66 "__pycache__", 67 ".pyc", 68 ] 69 deletion_list = CleanCommand.find( 70 ".", includes=delete_patterns, excludes=["\\.nox/.*"] 71 ) 72 73 for f in deletion_list: 74 if exists(f): 75 if isdir(f): 76 shutil.rmtree(f, ignore_errors=True) 77 else: 78 os.unlink(f) 79 80 81 with open("README.md", "r") as fh: 82 LONG_DESC = fh.read() 83 setup( 84 cmdclass={"clean": CleanCommand}, 85 name="hydra-core", 86 version=find_version("hydra", "__init__.py"), 87 author="Omry Yadan", 88 author_email="omry@fb.com", 89 description="Hydra is a library for writing flexible command line applications", 90 long_description=LONG_DESC, 91 long_description_content_type="text/markdown", 92 url="https://github.com/facebookresearch/hydra", 93 keywords="command-line configuration yaml tab-completion", 94 packages=find_packages(), 95 include_package_data=True, 96 classifiers=[ 97 "License :: OSI Approved :: MIT License", 98 "Development Status :: 4 - Beta", 99 "Programming Language :: Python :: 2.7", 100 "Programming Language :: Python :: 3.6", 101 "Programming Language :: Python :: 3.7", 102 "Operating System :: POSIX :: Linux", 103 "Operating System :: MacOS", 104 "Operating System :: Microsoft :: Windows", 105 ], 106 install_requires=[ 107 "omegaconf>=1.4.0rc2", 108 'pathlib2>=2.2.0;python_version<"3.0"', 109 ], 110 # Install development dependencies with 111 # pip install -e .[dev] 112 extras_require={ 113 "dev": [ 114 "black", 115 "coverage", 116 "flake8", 117 "flake8-copyright", 118 "nox", 119 "pre-commit", 120 "pytest", 121 "setuptools", 122 "towncrier", 123 "twine", 124 ] 125 }, 126 ) 127 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -104,7 +104,7 @@ "Operating System :: Microsoft :: Windows", ], install_requires=[ - "omegaconf>=1.4.0rc2", + "omegaconf>=1.4.0rc3", 'pathlib2>=2.2.0;python_version<"3.0"', ], # Install development dependencies with
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -104,7 +104,7 @@\n \"Operating System :: Microsoft :: Windows\",\n ],\n install_requires=[\n- \"omegaconf>=1.4.0rc2\",\n+ \"omegaconf>=1.4.0rc3\",\n 'pathlib2>=2.2.0;python_version<\"3.0\"',\n ],\n # Install development dependencies with\n", "issue": "[Bug] example of override fail in multirun\nThis fails\r\n\r\n`python examples/tutorial/5_composition/my_app.py -m db=mysql,postgresql db.user=omry`\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport codecs\nimport distutils\nimport os\nimport re\nimport shutil\nfrom os.path import join, exists, isdir\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n with codecs.open(os.path.join(here, *parts), \"r\") as fp:\n return fp.read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nclass CleanCommand(distutils.cmd.Command):\n \"\"\"\n Our custom command to clean out junk files.\n \"\"\"\n\n description = \"Cleans out junk files we don't want in the repo\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def find(root, includes, excludes=[]):\n res = []\n for parent, dirs, files in os.walk(root):\n for f in dirs + files:\n add = list()\n for include in includes:\n if re.findall(include, f):\n add.append(join(parent, f))\n res.extend(add)\n final_list = []\n # Exclude things that matches an exclude pattern\n for ex in excludes:\n for file in res:\n if not re.findall(ex, file):\n final_list.append(file)\n return final_list\n\n def run(self):\n delete_patterns = [\n \".eggs\",\n \".egg-info\",\n \".pytest_cache\",\n \"build\",\n \"dist\",\n \"__pycache__\",\n \".pyc\",\n ]\n deletion_list = CleanCommand.find(\n \".\", includes=delete_patterns, excludes=[\"\\\\.nox/.*\"]\n )\n\n for f in deletion_list:\n if exists(f):\n if isdir(f):\n shutil.rmtree(f, ignore_errors=True)\n else:\n os.unlink(f)\n\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n cmdclass={\"clean\": CleanCommand},\n name=\"hydra-core\",\n version=find_version(\"hydra\", \"__init__.py\"),\n author=\"Omry Yadan\",\n author_email=\"omry@fb.com\",\n description=\"Hydra is a library for writing flexible command line applications\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra\",\n keywords=\"command-line configuration yaml tab-completion\",\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n ],\n install_requires=[\n \"omegaconf>=1.4.0rc2\",\n 'pathlib2>=2.2.0;python_version<\"3.0\"',\n ],\n # Install development dependencies with\n # pip install -e .[dev]\n extras_require={\n \"dev\": [\n \"black\",\n \"coverage\",\n \"flake8\",\n \"flake8-copyright\",\n \"nox\",\n \"pre-commit\",\n \"pytest\",\n \"setuptools\",\n \"towncrier\",\n \"twine\",\n ]\n },\n )\n", "path": "setup.py"}]}
1,639
105
gh_patches_debug_21735
rasdani/github-patches
git_diff
akvo__akvo-rsr-4750
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Collaborate draft update bug </issue> <code> [start of akvo/rest/views/indicator_period_data.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 import json 8 import os 9 10 from akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment, Project 11 from akvo.rest.models import TastyTokenAuthentication, JWTAuthentication 12 from akvo.rsr.models.result.utils import QUANTITATIVE, PERCENTAGE_MEASURE 13 14 from ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer, 15 IndicatorPeriodDataCommentSerializer) 16 from ..viewsets import PublicProjectViewSet 17 18 from django.shortcuts import get_object_or_404 19 from django.http import HttpResponseBadRequest, HttpResponseForbidden 20 from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION 21 from django.contrib.contenttypes.models import ContentType 22 from rest_framework import status 23 from rest_framework.authentication import SessionAuthentication 24 from rest_framework.decorators import api_view, authentication_classes 25 from rest_framework.response import Response 26 from rest_framework.utils.encoders import JSONEncoder 27 28 29 class IndicatorPeriodDataViewSet(PublicProjectViewSet): 30 """ 31 """ 32 queryset = IndicatorPeriodData.objects.select_related('user', 'approved_by').all() 33 serializer_class = IndicatorPeriodDataSerializer 34 35 project_relation = 'period__indicator__result__project__' 36 37 def filter_queryset(self, queryset): 38 queryset = super(IndicatorPeriodDataViewSet, self).filter_queryset(queryset) 39 return IndicatorPeriodData.get_user_viewable_updates( 40 queryset, self.request.user 41 ) 42 43 def perform_create(self, serializer): 44 serializer.save(user=self.request.user) 45 46 47 class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet): 48 """ 49 """ 50 authentication_classes = (SessionAuthentication, TastyTokenAuthentication, JWTAuthentication) 51 52 queryset = IndicatorPeriodData.objects.select_related( 53 'period', 54 'user', 55 'approved_by', 56 ).prefetch_related( 57 'comments', 58 'disaggregations', 59 ).all() 60 serializer_class = IndicatorPeriodDataFrameworkSerializer 61 project_relation = 'period__indicator__result__project__' 62 63 def get_queryset(self): 64 queryset = getattr(self, '_c_queryset', None) 65 if queryset is None: 66 queryset = super(IndicatorPeriodDataFrameworkViewSet, self).get_queryset() 67 queryset = IndicatorPeriodData.get_user_viewable_updates( 68 queryset, self.request.user 69 ) 70 self._c_queryset = queryset 71 72 return queryset 73 74 def perform_create(self, serializer): 75 data = {key: value for key, value in serializer.validated_data.items() if key not in ['period', 'files', 'photos', 'approved_by']} 76 if len(serializer._disaggregations_data) > 0: 77 data['disaggregations'] = [ 78 {key: value for key, value in dsg.items() if key in ['id', 'dimension_value', 'value', 'numerator', 'denominator']} 79 for dsg in serializer._disaggregations_data 80 ] 81 user = self.request.user 82 serializer.save(user=user) 83 instance = serializer.instance 84 log_data = {'audit_trail': True, 'data': data} 85 LogEntry.objects.log_action( 86 user_id=user.id, 87 content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id, 88 object_id=instance.id, 89 object_repr=str(instance), 90 action_flag=ADDITION, 91 change_message=json.dumps(log_data, cls=JSONEncoder) 92 ) 93 94 def perform_update(self, serializer): 95 instance = serializer.instance 96 data = { 97 key: value 98 for key, value in serializer.validated_data.items() 99 if key not in ['period', 'files', 'photos', 'approved_by'] and (key == 'comments' or getattr(instance, key) != value) 100 } 101 if len(serializer._disaggregations_data) > 0: 102 indicator = instance.period.indicator 103 is_percentage = indicator.type == QUANTITATIVE and indicator.measure == PERCENTAGE_MEASURE 104 dsg_attrs = ['id', 'dimension_value', 'numerator', 'denominator'] if is_percentage else ['id', 'dimension_value', 'value'] 105 data['disaggregations'] = [ 106 {key: value for key, value in dsg.items() if key in dsg_attrs} 107 for dsg in serializer._disaggregations_data 108 ] 109 user = self.request.user 110 status = data.get('status', None) 111 if status == 'R' or status == 'A': 112 serializer.save() 113 else: 114 serializer.save(user=user) 115 log_data = {'audit_trail': True, 'data': data} 116 LogEntry.objects.log_action( 117 user_id=user.id, 118 content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id, 119 object_id=instance.id, 120 object_repr=str(instance), 121 action_flag=CHANGE, 122 change_message=json.dumps(log_data, cls=JSONEncoder) 123 ) 124 125 def perform_destroy(self, instance): 126 object_id = instance.id 127 object_repr = str(instance) 128 super().perform_destroy(instance) 129 LogEntry.objects.log_action( 130 user_id=self.request.user.id, 131 content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id, 132 object_id=object_id, 133 object_repr=object_repr, 134 action_flag=DELETION, 135 change_message=json.dumps({'audit_trail': True}) 136 ) 137 138 139 class IndicatorPeriodDataCommentViewSet(PublicProjectViewSet): 140 """ 141 """ 142 authentication_classes = (SessionAuthentication, TastyTokenAuthentication, JWTAuthentication) 143 144 # TODO: Is there more optimization possible? 145 queryset = IndicatorPeriodDataComment.objects.select_related( 146 'user' 147 ).prefetch_related( 148 'user__employers', 'user__employers__organisation' 149 ) 150 serializer_class = IndicatorPeriodDataCommentSerializer 151 project_relation = 'data__period__indicator__result__project__' 152 153 def perform_create(self, serializer): 154 serializer.save(user=self.request.user) 155 156 157 @api_view(['POST', 'DELETE']) 158 @authentication_classes([SessionAuthentication, TastyTokenAuthentication, JWTAuthentication]) 159 def period_update_files(request, update_pk, file_pk=None): 160 update = get_object_or_404(IndicatorPeriodData, pk=update_pk) 161 user = request.user 162 if not user.has_perm('rsr.change_indicatorperioddata', update): 163 return Response({'error': 'User has no permission to add/remove files'}, status=status.HTTP_403_FORBIDDEN) 164 165 if request.method == 'POST' and not file_pk: 166 serializer = IndicatorPeriodDataFrameworkSerializer(instance=update, data=request.data, partial=True) 167 serializer.is_valid(raise_exception=True) 168 files = [f"Uploaded file \"{file.name}\"" for file in serializer.validated_data.get('files', [])] 169 serializer.save(user=user) 170 log_data = {'audit_trail': True, 'data': {'files': files}} 171 LogEntry.objects.log_action( 172 user_id=user.id, 173 content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id, 174 object_id=update.id, 175 object_repr=str(update), 176 action_flag=CHANGE, 177 change_message=json.dumps(log_data) 178 ) 179 return Response(serializer.data['file_set']) 180 181 if request.method == 'DELETE' and file_pk: 182 file = update.indicatorperioddatafile_set.get(pk=file_pk) 183 filename = os.path.basename(file.file.name) 184 file.delete() 185 update.user = user 186 update.save(update_fields=['user']) 187 log_data = {'audit_trail': True, 'data': {'files': [f"Removed file \"{filename}\""]}} 188 LogEntry.objects.log_action( 189 user_id=user.id, 190 content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id, 191 object_id=update.id, 192 object_repr=str(update), 193 action_flag=CHANGE, 194 change_message=json.dumps(log_data) 195 ) 196 return Response(status=status.HTTP_204_NO_CONTENT) 197 198 return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) 199 200 201 @api_view(['POST', 'DELETE']) 202 @authentication_classes([SessionAuthentication, TastyTokenAuthentication, JWTAuthentication]) 203 def period_update_photos(request, update_pk, photo_pk=None): 204 update = get_object_or_404(IndicatorPeriodData, pk=update_pk) 205 user = request.user 206 if user != update.user: 207 return Response({'error': 'User has no permission to add/remove photos'}, status=status.HTTP_403_FORBIDDEN) 208 209 if request.method == 'POST' and not photo_pk: 210 serializer = IndicatorPeriodDataFrameworkSerializer(instance=update, data=request.data, partial=True) 211 serializer.is_valid(raise_exception=True) 212 serializer.save(user=user) 213 return Response(serializer.data['photo_set']) 214 215 if request.method == 'DELETE' and photo_pk: 216 photo = update.indicatorperioddataphoto_set.get(pk=photo_pk) 217 photo.delete() 218 return Response(status=status.HTTP_204_NO_CONTENT) 219 220 return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) 221 222 223 @api_view(['POST', 'DELETE']) 224 def indicator_upload_file(request, pk=None): 225 """ 226 Special API call for directly uploading a file. 227 228 :param request; A Django request object. 229 :param pk; The primary key of an IndicatorPeriodData instance. 230 """ 231 # Permissions 232 user = getattr(request, 'user', None) 233 if not user: 234 return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN) 235 # TODO: Check if user is allowed to upload a file 236 # if not user.has_perm('rsr.change_project', update.period.indicator.result.project): 237 # return Response({'error': 'User has no permission to place an update'}, 238 # status=status.HTTP_403_FORBIDDEN) 239 240 update = IndicatorPeriodData.objects.get(pk=pk) 241 if request.method == 'DELETE': 242 try: 243 if request.data['type'] == 'photo': 244 update.photo = '' 245 update.save(update_fields=['photo']) 246 return Response({}, status=status.HTTP_204_NO_CONTENT) 247 elif request.data['type'] == 'file': 248 update.file = '' 249 update.save(update_fields=['file']) 250 return Response({}, status=status.HTTP_204_NO_CONTENT) 251 except Exception as e: 252 return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST) 253 else: # POST 254 upload_file = request.data['file'] 255 try: 256 file_type = request.POST.copy()['type'] 257 if file_type == 'photo': 258 update.photo = upload_file 259 update.save(update_fields=['photo']) 260 # Add photo member to be able to distinguish from file URL in new results version 261 # while keeping the old API 262 return Response({'file': update.photo.url, 'photo': update.photo.url}) 263 elif file_type == 'file': 264 update.file = upload_file 265 update.save(update_fields=['file']) 266 return Response({'file': update.file.url}) 267 except Exception as e: 268 return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST) 269 270 271 @api_view(['POST']) 272 @authentication_classes([SessionAuthentication, TastyTokenAuthentication]) 273 def set_updates_status(request, project_pk): 274 """Bulk update IndicatorPeriodData.status attributes of a project. 275 """ 276 update_ids = request.data.get('updates', []) 277 status = request.data.get('status', None) 278 if len(update_ids) < 1 or status is None: 279 return HttpResponseBadRequest() 280 user = request.user 281 project = get_object_or_404(Project, pk=project_pk) 282 if not user.has_perm('rsr.change_project', project): 283 return HttpResponseForbidden() 284 IndicatorPeriodData.objects\ 285 .filter(id__in=update_ids, period__indicator__result__project=project)\ 286 .update(status=status) 287 log_data = {'audit_trail': True, 'data': {'status': status}} 288 for update_id in update_ids: 289 LogEntry.objects.log_action( 290 user_id=user.id, 291 content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id, 292 object_id=update_id, 293 object_repr='IndicatorPeriodData', 294 action_flag=CHANGE, 295 change_message=json.dumps(log_data) 296 ) 297 return Response({'success': True}) 298 [end of akvo/rest/views/indicator_period_data.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py --- a/akvo/rest/views/indicator_period_data.py +++ b/akvo/rest/views/indicator_period_data.py @@ -60,15 +60,22 @@ serializer_class = IndicatorPeriodDataFrameworkSerializer project_relation = 'period__indicator__result__project__' - def get_queryset(self): - queryset = getattr(self, '_c_queryset', None) - if queryset is None: - queryset = super(IndicatorPeriodDataFrameworkViewSet, self).get_queryset() - queryset = IndicatorPeriodData.get_user_viewable_updates( - queryset, self.request.user - ) - self._c_queryset = queryset + def get_object(self): + obj = get_object_or_404(self.get_queryset(), pk=self.kwargs['pk']) + # check whether the user has permission + viewables = IndicatorPeriodData.get_user_viewable_updates( + self.get_queryset().filter(pk=self.kwargs['pk']), + self.request.user + ) + if viewables.count() == 0: + self.permission_denied(self.request) + return obj + def filter_queryset(self, queryset): + queryset = super().filter_queryset(queryset) + queryset = IndicatorPeriodData.get_user_viewable_updates( + queryset, self.request.user + ) return queryset def perform_create(self, serializer):
{"golden_diff": "diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py\n--- a/akvo/rest/views/indicator_period_data.py\n+++ b/akvo/rest/views/indicator_period_data.py\n@@ -60,15 +60,22 @@\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n project_relation = 'period__indicator__result__project__'\n \n- def get_queryset(self):\n- queryset = getattr(self, '_c_queryset', None)\n- if queryset is None:\n- queryset = super(IndicatorPeriodDataFrameworkViewSet, self).get_queryset()\n- queryset = IndicatorPeriodData.get_user_viewable_updates(\n- queryset, self.request.user\n- )\n- self._c_queryset = queryset\n+ def get_object(self):\n+ obj = get_object_or_404(self.get_queryset(), pk=self.kwargs['pk'])\n+ # check whether the user has permission\n+ viewables = IndicatorPeriodData.get_user_viewable_updates(\n+ self.get_queryset().filter(pk=self.kwargs['pk']),\n+ self.request.user\n+ )\n+ if viewables.count() == 0:\n+ self.permission_denied(self.request)\n+ return obj\n \n+ def filter_queryset(self, queryset):\n+ queryset = super().filter_queryset(queryset)\n+ queryset = IndicatorPeriodData.get_user_viewable_updates(\n+ queryset, self.request.user\n+ )\n return queryset\n \n def perform_create(self, serializer):\n", "issue": "Collaborate draft update bug\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport json\nimport os\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment, Project\nfrom akvo.rest.models import TastyTokenAuthentication, JWTAuthentication\nfrom akvo.rsr.models.result.utils import QUANTITATIVE, PERCENTAGE_MEASURE\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponseBadRequest, HttpResponseForbidden\nfrom django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION\nfrom django.contrib.contenttypes.models import ContentType\nfrom rest_framework import status\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.decorators import api_view, authentication_classes\nfrom rest_framework.response import Response\nfrom rest_framework.utils.encoders import JSONEncoder\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.select_related('user', 'approved_by').all()\n serializer_class = IndicatorPeriodDataSerializer\n\n project_relation = 'period__indicator__result__project__'\n\n def filter_queryset(self, queryset):\n queryset = super(IndicatorPeriodDataViewSet, self).filter_queryset(queryset)\n return IndicatorPeriodData.get_user_viewable_updates(\n queryset, self.request.user\n )\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n authentication_classes = (SessionAuthentication, TastyTokenAuthentication, JWTAuthentication)\n\n queryset = IndicatorPeriodData.objects.select_related(\n 'period',\n 'user',\n 'approved_by',\n ).prefetch_related(\n 'comments',\n 'disaggregations',\n ).all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n project_relation = 'period__indicator__result__project__'\n\n def get_queryset(self):\n queryset = getattr(self, '_c_queryset', None)\n if queryset is None:\n queryset = super(IndicatorPeriodDataFrameworkViewSet, self).get_queryset()\n queryset = IndicatorPeriodData.get_user_viewable_updates(\n queryset, self.request.user\n )\n self._c_queryset = queryset\n\n return queryset\n\n def perform_create(self, serializer):\n data = {key: value for key, value in serializer.validated_data.items() if key not in ['period', 'files', 'photos', 'approved_by']}\n if len(serializer._disaggregations_data) > 0:\n data['disaggregations'] = [\n {key: value for key, value in dsg.items() if key in ['id', 'dimension_value', 'value', 'numerator', 'denominator']}\n for dsg in serializer._disaggregations_data\n ]\n user = self.request.user\n serializer.save(user=user)\n instance = serializer.instance\n log_data = {'audit_trail': True, 'data': data}\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=instance.id,\n object_repr=str(instance),\n action_flag=ADDITION,\n change_message=json.dumps(log_data, cls=JSONEncoder)\n )\n\n def perform_update(self, serializer):\n instance = serializer.instance\n data = {\n key: value\n for key, value in serializer.validated_data.items()\n if key not in ['period', 'files', 'photos', 'approved_by'] and (key == 'comments' or getattr(instance, key) != value)\n }\n if len(serializer._disaggregations_data) > 0:\n indicator = instance.period.indicator\n is_percentage = indicator.type == QUANTITATIVE and indicator.measure == PERCENTAGE_MEASURE\n dsg_attrs = ['id', 'dimension_value', 'numerator', 'denominator'] if is_percentage else ['id', 'dimension_value', 'value']\n data['disaggregations'] = [\n {key: value for key, value in dsg.items() if key in dsg_attrs}\n for dsg in serializer._disaggregations_data\n ]\n user = self.request.user\n status = data.get('status', None)\n if status == 'R' or status == 'A':\n serializer.save()\n else:\n serializer.save(user=user)\n log_data = {'audit_trail': True, 'data': data}\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=instance.id,\n object_repr=str(instance),\n action_flag=CHANGE,\n change_message=json.dumps(log_data, cls=JSONEncoder)\n )\n\n def perform_destroy(self, instance):\n object_id = instance.id\n object_repr = str(instance)\n super().perform_destroy(instance)\n LogEntry.objects.log_action(\n user_id=self.request.user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=object_id,\n object_repr=object_repr,\n action_flag=DELETION,\n change_message=json.dumps({'audit_trail': True})\n )\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n authentication_classes = (SessionAuthentication, TastyTokenAuthentication, JWTAuthentication)\n\n # TODO: Is there more optimization possible?\n queryset = IndicatorPeriodDataComment.objects.select_related(\n 'user'\n ).prefetch_related(\n 'user__employers', 'user__employers__organisation'\n )\n serializer_class = IndicatorPeriodDataCommentSerializer\n project_relation = 'data__period__indicator__result__project__'\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\n@api_view(['POST', 'DELETE'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication, JWTAuthentication])\ndef period_update_files(request, update_pk, file_pk=None):\n update = get_object_or_404(IndicatorPeriodData, pk=update_pk)\n user = request.user\n if not user.has_perm('rsr.change_indicatorperioddata', update):\n return Response({'error': 'User has no permission to add/remove files'}, status=status.HTTP_403_FORBIDDEN)\n\n if request.method == 'POST' and not file_pk:\n serializer = IndicatorPeriodDataFrameworkSerializer(instance=update, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n files = [f\"Uploaded file \\\"{file.name}\\\"\" for file in serializer.validated_data.get('files', [])]\n serializer.save(user=user)\n log_data = {'audit_trail': True, 'data': {'files': files}}\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=update.id,\n object_repr=str(update),\n action_flag=CHANGE,\n change_message=json.dumps(log_data)\n )\n return Response(serializer.data['file_set'])\n\n if request.method == 'DELETE' and file_pk:\n file = update.indicatorperioddatafile_set.get(pk=file_pk)\n filename = os.path.basename(file.file.name)\n file.delete()\n update.user = user\n update.save(update_fields=['user'])\n log_data = {'audit_trail': True, 'data': {'files': [f\"Removed file \\\"{filename}\\\"\"]}}\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=update.id,\n object_repr=str(update),\n action_flag=CHANGE,\n change_message=json.dumps(log_data)\n )\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n@api_view(['POST', 'DELETE'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication, JWTAuthentication])\ndef period_update_photos(request, update_pk, photo_pk=None):\n update = get_object_or_404(IndicatorPeriodData, pk=update_pk)\n user = request.user\n if user != update.user:\n return Response({'error': 'User has no permission to add/remove photos'}, status=status.HTTP_403_FORBIDDEN)\n\n if request.method == 'POST' and not photo_pk:\n serializer = IndicatorPeriodDataFrameworkSerializer(instance=update, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save(user=user)\n return Response(serializer.data['photo_set'])\n\n if request.method == 'DELETE' and photo_pk:\n photo = update.indicatorperioddataphoto_set.get(pk=photo_pk)\n photo.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n@api_view(['POST', 'DELETE'])\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n # Permissions\n user = getattr(request, 'user', None)\n if not user:\n return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n # TODO: Check if user is allowed to upload a file\n # if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n # return Response({'error': 'User has no permission to place an update'},\n # status=status.HTTP_403_FORBIDDEN)\n\n update = IndicatorPeriodData.objects.get(pk=pk)\n if request.method == 'DELETE':\n try:\n if request.data['type'] == 'photo':\n update.photo = ''\n update.save(update_fields=['photo'])\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n elif request.data['type'] == 'file':\n update.file = ''\n update.save(update_fields=['file'])\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n else: # POST\n upload_file = request.data['file']\n try:\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n # Add photo member to be able to distinguish from file URL in new results version\n # while keeping the old API\n return Response({'file': update.photo.url, 'photo': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['POST'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication])\ndef set_updates_status(request, project_pk):\n \"\"\"Bulk update IndicatorPeriodData.status attributes of a project.\n \"\"\"\n update_ids = request.data.get('updates', [])\n status = request.data.get('status', None)\n if len(update_ids) < 1 or status is None:\n return HttpResponseBadRequest()\n user = request.user\n project = get_object_or_404(Project, pk=project_pk)\n if not user.has_perm('rsr.change_project', project):\n return HttpResponseForbidden()\n IndicatorPeriodData.objects\\\n .filter(id__in=update_ids, period__indicator__result__project=project)\\\n .update(status=status)\n log_data = {'audit_trail': True, 'data': {'status': status}}\n for update_id in update_ids:\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=update_id,\n object_repr='IndicatorPeriodData',\n action_flag=CHANGE,\n change_message=json.dumps(log_data)\n )\n return Response({'success': True})\n", "path": "akvo/rest/views/indicator_period_data.py"}]}
4,008
323
gh_patches_debug_32693
rasdani/github-patches
git_diff
liqd__a4-meinberlin-1972
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> status bar AND project finished both shown on pop up ![bildschirmfoto 2019-02-04 um 14 11 24](https://user-images.githubusercontent.com/35491681/52210297-ebe6a200-2886-11e9-8f4e-4360398140b8.png) </issue> <code> [start of meinberlin/apps/plans/serializers.py] 1 from django.utils.translation import ugettext as _ 2 from easy_thumbnails.files import get_thumbnailer 3 from rest_framework import serializers 4 5 from adhocracy4.projects.models import Project 6 from meinberlin.apps.projects import get_project_type 7 8 from .models import Plan 9 10 11 class CommonFields: 12 13 def get_district(self, instance): 14 city_wide = _('City wide') 15 district_name = str(city_wide) 16 if instance.administrative_district: 17 district_name = instance.administrative_district.name 18 return district_name 19 20 def get_point(self, instance): 21 point = instance.point 22 if not point: 23 point = '' 24 return point 25 26 def get_organisation(self, instance): 27 return instance.organisation.name 28 29 30 class ProjectSerializer(serializers.ModelSerializer, CommonFields): 31 type = serializers.SerializerMethodField() 32 subtype = serializers.SerializerMethodField() 33 title = serializers.SerializerMethodField() 34 url = serializers.SerializerMethodField() 35 point = serializers.SerializerMethodField() 36 point_label = serializers.SerializerMethodField() 37 cost = serializers.SerializerMethodField() 38 district = serializers.SerializerMethodField() 39 status = serializers.SerializerMethodField() 40 organisation = serializers.SerializerMethodField() 41 participation = serializers.SerializerMethodField() 42 participation_display = serializers.SerializerMethodField() 43 participation_active = serializers.SerializerMethodField() 44 participation_string = serializers.SerializerMethodField() 45 future_phase = serializers.SerializerMethodField() 46 active_phase = serializers.SerializerMethodField() 47 past_phase = serializers.SerializerMethodField() 48 tile_image = serializers.SerializerMethodField() 49 plan_url = serializers.SerializerMethodField() 50 plan_title = serializers.SerializerMethodField() 51 published_projects_count = serializers.SerializerMethodField() 52 created_or_modified = serializers.SerializerMethodField() 53 54 class Meta: 55 model = Project 56 fields = ['type', 'subtype', 'title', 'url', 57 'organisation', 'tile_image', 58 'tile_image_copyright', 59 'point', 'point_label', 'cost', 60 'district', 'topics', 61 'status', 62 'participation_string', 63 'participation_active', 64 'participation', 'participation_display', 'description', 65 'future_phase', 'active_phase', 66 'past_phase', 'plan_url', 'plan_title', 67 'published_projects_count', 'created_or_modified'] 68 69 def _get_participation_status_project(self, instance): 70 project_phases = instance.phases 71 72 if project_phases.active_phases(): 73 return _('running'), True 74 75 if project_phases.future_phases(): 76 try: 77 return (_('starts at {}').format 78 (project_phases.future_phases().first(). 79 start_date.date().strftime('%d.%m.%Y')), 80 True) 81 except AttributeError as e: 82 print(e) 83 return (_('starts in the future'), 84 True) 85 else: 86 return _('done'), False 87 88 def get_type(self, instance): 89 return 'project' 90 91 def get_subtype(self, instance): 92 subtype = get_project_type(instance) 93 if subtype in ('external', 'bplan'): 94 return 'external' 95 return subtype 96 97 def get_title(self, instance): 98 return instance.name 99 100 def get_url(self, instance): 101 if get_project_type(instance) in ('external', 'bplan'): 102 return instance.externalproject.url 103 return instance.get_absolute_url() 104 105 def get_tile_image(self, instance): 106 image_url = '' 107 if instance.tile_image: 108 image = get_thumbnailer(instance.tile_image)['project_tile'] 109 image_url = image.url 110 return image_url 111 112 def get_status(self, instance): 113 project_phases = instance.phases 114 if project_phases.active_phases() or project_phases.future_phases(): 115 return 0 116 return 1 117 118 def get_participation(self, instance): 119 return 0 120 121 def get_participation_display(self, instance): 122 return _('Yes') 123 124 def get_future_phase(self, instance): 125 if (instance.future_phases and 126 instance.future_phases.first().start_date): 127 return str( 128 instance.future_phases.first().start_date.date()) 129 return False 130 131 def get_active_phase(self, instance): 132 project_phases = instance.phases 133 if project_phases.active_phases(): 134 progress = instance.active_phase_progress 135 time_left = instance.time_left 136 end_date = str(project_phases.active_phases().last().end_date) 137 return [progress, time_left, end_date] 138 return False 139 140 def get_past_phase(self, instance): 141 project_phases = instance.phases 142 if (project_phases.past_phases() and 143 project_phases.past_phases().first().end_date): 144 return str( 145 project_phases.past_phases().first().end_date.date()) 146 return False 147 148 def get_participation_string(self, instance): 149 participation_string, participation_active = \ 150 self._get_participation_status_project(instance) 151 return str(participation_string) 152 153 def get_participation_active(self, instance): 154 participation_string, participation_active = \ 155 self._get_participation_status_project(instance) 156 return participation_active 157 158 def get_plan_url(self, instance): 159 if instance.plans.exists(): 160 return instance.plans.first().get_absolute_url() 161 return None 162 163 def get_plan_title(self, instance): 164 if instance.plans.exists(): 165 return instance.plans.first().title 166 return None 167 168 def get_published_projects_count(self, instance): 169 if hasattr(instance, 'projectcontainer') and instance.projectcontainer: 170 return instance.projectcontainer.active_project_count 171 172 def get_point_label(self, instance): 173 return '' 174 175 def get_cost(self, instance): 176 return '' 177 178 def get_created_or_modified(self, instance): 179 if instance.modified: 180 return str(instance.modified) 181 return str(instance.created) 182 183 184 class PlanSerializer(serializers.ModelSerializer, CommonFields): 185 type = serializers.SerializerMethodField() 186 subtype = serializers.SerializerMethodField() 187 url = serializers.SerializerMethodField() 188 district = serializers.SerializerMethodField() 189 point = serializers.SerializerMethodField() 190 participation_active = serializers.SerializerMethodField() 191 participation_string = serializers.SerializerMethodField() 192 published_projects_count = serializers.SerializerMethodField() 193 organisation = serializers.SerializerMethodField() 194 created_or_modified = serializers.SerializerMethodField() 195 196 class Meta: 197 model = Plan 198 fields = ['type', 'subtype', 'title', 'url', 199 'organisation', 'point', 200 'point_label', 'cost', 201 'district', 'topics', 'status', 202 'participation', 203 'participation_string', 204 'participation_active', 205 'published_projects_count', 'created_or_modified'] 206 207 def get_subtype(self, instance): 208 return 'plan' 209 210 def _get_participation_status_plan(self, item): 211 projects = item.published_projects 212 if not projects: 213 return item.get_participation_display(), False 214 else: 215 status_string = item.participation_string 216 if status_string: 217 return status_string, True 218 else: 219 return item.get_participation_display(), False 220 221 def get_type(self, instance): 222 return 'plan' 223 224 def get_url(self, instance): 225 return instance.get_absolute_url() 226 227 def get_published_projects_count(self, instance): 228 return instance.published_projects.count() 229 230 def get_participation_string(self, instance): 231 participation_string, participation_active = \ 232 self._get_participation_status_plan(instance) 233 return str(participation_string) 234 235 def get_participation_active(self, instance): 236 participation_string, participation_active = \ 237 self._get_participation_status_plan(instance) 238 return participation_active 239 240 def get_created_or_modified(self, instance): 241 if instance.modified: 242 return str(instance.modified) 243 return str(instance.created) 244 [end of meinberlin/apps/plans/serializers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/plans/serializers.py b/meinberlin/apps/plans/serializers.py --- a/meinberlin/apps/plans/serializers.py +++ b/meinberlin/apps/plans/serializers.py @@ -26,6 +26,11 @@ def get_organisation(self, instance): return instance.organisation.name + def get_created_or_modified(self, instance): + if instance.modified: + return str(instance.modified) + return str(instance.created) + class ProjectSerializer(serializers.ModelSerializer, CommonFields): type = serializers.SerializerMethodField() @@ -130,7 +135,7 @@ def get_active_phase(self, instance): project_phases = instance.phases - if project_phases.active_phases(): + if project_phases.active_phases() and instance.active_phase_progress: progress = instance.active_phase_progress time_left = instance.time_left end_date = str(project_phases.active_phases().last().end_date) @@ -175,11 +180,6 @@ def get_cost(self, instance): return '' - def get_created_or_modified(self, instance): - if instance.modified: - return str(instance.modified) - return str(instance.created) - class PlanSerializer(serializers.ModelSerializer, CommonFields): type = serializers.SerializerMethodField() @@ -236,8 +236,3 @@ participation_string, participation_active = \ self._get_participation_status_plan(instance) return participation_active - - def get_created_or_modified(self, instance): - if instance.modified: - return str(instance.modified) - return str(instance.created)
{"golden_diff": "diff --git a/meinberlin/apps/plans/serializers.py b/meinberlin/apps/plans/serializers.py\n--- a/meinberlin/apps/plans/serializers.py\n+++ b/meinberlin/apps/plans/serializers.py\n@@ -26,6 +26,11 @@\n def get_organisation(self, instance):\n return instance.organisation.name\n \n+ def get_created_or_modified(self, instance):\n+ if instance.modified:\n+ return str(instance.modified)\n+ return str(instance.created)\n+\n \n class ProjectSerializer(serializers.ModelSerializer, CommonFields):\n type = serializers.SerializerMethodField()\n@@ -130,7 +135,7 @@\n \n def get_active_phase(self, instance):\n project_phases = instance.phases\n- if project_phases.active_phases():\n+ if project_phases.active_phases() and instance.active_phase_progress:\n progress = instance.active_phase_progress\n time_left = instance.time_left\n end_date = str(project_phases.active_phases().last().end_date)\n@@ -175,11 +180,6 @@\n def get_cost(self, instance):\n return ''\n \n- def get_created_or_modified(self, instance):\n- if instance.modified:\n- return str(instance.modified)\n- return str(instance.created)\n-\n \n class PlanSerializer(serializers.ModelSerializer, CommonFields):\n type = serializers.SerializerMethodField()\n@@ -236,8 +236,3 @@\n participation_string, participation_active = \\\n self._get_participation_status_plan(instance)\n return participation_active\n-\n- def get_created_or_modified(self, instance):\n- if instance.modified:\n- return str(instance.modified)\n- return str(instance.created)\n", "issue": "status bar AND project finished both shown on pop up\n![bildschirmfoto 2019-02-04 um 14 11 24](https://user-images.githubusercontent.com/35491681/52210297-ebe6a200-2886-11e9-8f4e-4360398140b8.png)\r\n\n", "before_files": [{"content": "from django.utils.translation import ugettext as _\nfrom easy_thumbnails.files import get_thumbnailer\nfrom rest_framework import serializers\n\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.projects import get_project_type\n\nfrom .models import Plan\n\n\nclass CommonFields:\n\n def get_district(self, instance):\n city_wide = _('City wide')\n district_name = str(city_wide)\n if instance.administrative_district:\n district_name = instance.administrative_district.name\n return district_name\n\n def get_point(self, instance):\n point = instance.point\n if not point:\n point = ''\n return point\n\n def get_organisation(self, instance):\n return instance.organisation.name\n\n\nclass ProjectSerializer(serializers.ModelSerializer, CommonFields):\n type = serializers.SerializerMethodField()\n subtype = serializers.SerializerMethodField()\n title = serializers.SerializerMethodField()\n url = serializers.SerializerMethodField()\n point = serializers.SerializerMethodField()\n point_label = serializers.SerializerMethodField()\n cost = serializers.SerializerMethodField()\n district = serializers.SerializerMethodField()\n status = serializers.SerializerMethodField()\n organisation = serializers.SerializerMethodField()\n participation = serializers.SerializerMethodField()\n participation_display = serializers.SerializerMethodField()\n participation_active = serializers.SerializerMethodField()\n participation_string = serializers.SerializerMethodField()\n future_phase = serializers.SerializerMethodField()\n active_phase = serializers.SerializerMethodField()\n past_phase = serializers.SerializerMethodField()\n tile_image = serializers.SerializerMethodField()\n plan_url = serializers.SerializerMethodField()\n plan_title = serializers.SerializerMethodField()\n published_projects_count = serializers.SerializerMethodField()\n created_or_modified = serializers.SerializerMethodField()\n\n class Meta:\n model = Project\n fields = ['type', 'subtype', 'title', 'url',\n 'organisation', 'tile_image',\n 'tile_image_copyright',\n 'point', 'point_label', 'cost',\n 'district', 'topics',\n 'status',\n 'participation_string',\n 'participation_active',\n 'participation', 'participation_display', 'description',\n 'future_phase', 'active_phase',\n 'past_phase', 'plan_url', 'plan_title',\n 'published_projects_count', 'created_or_modified']\n\n def _get_participation_status_project(self, instance):\n project_phases = instance.phases\n\n if project_phases.active_phases():\n return _('running'), True\n\n if project_phases.future_phases():\n try:\n return (_('starts at {}').format\n (project_phases.future_phases().first().\n start_date.date().strftime('%d.%m.%Y')),\n True)\n except AttributeError as e:\n print(e)\n return (_('starts in the future'),\n True)\n else:\n return _('done'), False\n\n def get_type(self, instance):\n return 'project'\n\n def get_subtype(self, instance):\n subtype = get_project_type(instance)\n if subtype in ('external', 'bplan'):\n return 'external'\n return subtype\n\n def get_title(self, instance):\n return instance.name\n\n def get_url(self, instance):\n if get_project_type(instance) in ('external', 'bplan'):\n return instance.externalproject.url\n return instance.get_absolute_url()\n\n def get_tile_image(self, instance):\n image_url = ''\n if instance.tile_image:\n image = get_thumbnailer(instance.tile_image)['project_tile']\n image_url = image.url\n return image_url\n\n def get_status(self, instance):\n project_phases = instance.phases\n if project_phases.active_phases() or project_phases.future_phases():\n return 0\n return 1\n\n def get_participation(self, instance):\n return 0\n\n def get_participation_display(self, instance):\n return _('Yes')\n\n def get_future_phase(self, instance):\n if (instance.future_phases and\n instance.future_phases.first().start_date):\n return str(\n instance.future_phases.first().start_date.date())\n return False\n\n def get_active_phase(self, instance):\n project_phases = instance.phases\n if project_phases.active_phases():\n progress = instance.active_phase_progress\n time_left = instance.time_left\n end_date = str(project_phases.active_phases().last().end_date)\n return [progress, time_left, end_date]\n return False\n\n def get_past_phase(self, instance):\n project_phases = instance.phases\n if (project_phases.past_phases() and\n project_phases.past_phases().first().end_date):\n return str(\n project_phases.past_phases().first().end_date.date())\n return False\n\n def get_participation_string(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_project(instance)\n return str(participation_string)\n\n def get_participation_active(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_project(instance)\n return participation_active\n\n def get_plan_url(self, instance):\n if instance.plans.exists():\n return instance.plans.first().get_absolute_url()\n return None\n\n def get_plan_title(self, instance):\n if instance.plans.exists():\n return instance.plans.first().title\n return None\n\n def get_published_projects_count(self, instance):\n if hasattr(instance, 'projectcontainer') and instance.projectcontainer:\n return instance.projectcontainer.active_project_count\n\n def get_point_label(self, instance):\n return ''\n\n def get_cost(self, instance):\n return ''\n\n def get_created_or_modified(self, instance):\n if instance.modified:\n return str(instance.modified)\n return str(instance.created)\n\n\nclass PlanSerializer(serializers.ModelSerializer, CommonFields):\n type = serializers.SerializerMethodField()\n subtype = serializers.SerializerMethodField()\n url = serializers.SerializerMethodField()\n district = serializers.SerializerMethodField()\n point = serializers.SerializerMethodField()\n participation_active = serializers.SerializerMethodField()\n participation_string = serializers.SerializerMethodField()\n published_projects_count = serializers.SerializerMethodField()\n organisation = serializers.SerializerMethodField()\n created_or_modified = serializers.SerializerMethodField()\n\n class Meta:\n model = Plan\n fields = ['type', 'subtype', 'title', 'url',\n 'organisation', 'point',\n 'point_label', 'cost',\n 'district', 'topics', 'status',\n 'participation',\n 'participation_string',\n 'participation_active',\n 'published_projects_count', 'created_or_modified']\n\n def get_subtype(self, instance):\n return 'plan'\n\n def _get_participation_status_plan(self, item):\n projects = item.published_projects\n if not projects:\n return item.get_participation_display(), False\n else:\n status_string = item.participation_string\n if status_string:\n return status_string, True\n else:\n return item.get_participation_display(), False\n\n def get_type(self, instance):\n return 'plan'\n\n def get_url(self, instance):\n return instance.get_absolute_url()\n\n def get_published_projects_count(self, instance):\n return instance.published_projects.count()\n\n def get_participation_string(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_plan(instance)\n return str(participation_string)\n\n def get_participation_active(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_plan(instance)\n return participation_active\n\n def get_created_or_modified(self, instance):\n if instance.modified:\n return str(instance.modified)\n return str(instance.created)\n", "path": "meinberlin/apps/plans/serializers.py"}]}
2,912
373
gh_patches_debug_20852
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-1729
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PearsonCorrCoeff returns nan when input is of type torch.float16 or torch.bfloat16. ## 🐛 Bug PearsonCorrCoeff returns nan when input is of type torch.float16 or torch.bfloat16 and all values are close. ### To Reproduce <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> <details> <summary> ```python import torch import torchmetrics as tm pcc = tm.regression.PearsonCorrCoef().to("cuda") pred = torch.tensor([0.4746, 0.4805, 0.4766, 0.4805, 0.4766, 0.4805, 0.4785, 0.4824, 0.4805],dtype=torch.float16).to("cuda") target = torch.tensor([0.0336, 0.3676, 0.6302, 0.7192, 0.2295, 0.2886, 0.6302, 0.7096, 0.0208],dtype=torch.float16).to("cuda") print(pcc(pred,target)) print(pcc(pred.to(torch.float32),target.to(torch.float32))) tensor(nan, device='cuda:0') tensor(0.3720, device='cuda:0') ``` </summary> <!-- Ideally attach a minimal code sample to reproduce the decried issue. Minimal means having the shortest code but still preserving the bug. --> </details> ### Expected behavior <!-- A clear and concise description of what you expected to happen. --> ### Environment Python version: 3.10.9 Torch version: 1.12.1 TorchMetrics version: 0.11.1 GPU device name: Tesla T4 CUDA Version: 11.4 ### Additional context When running in a training loop I found that some fraction (~30%) of steps would not produce a nan number when using torch.float16 or bfloat16, while the other ~70% would. This seems to occur because the values in pred above are not very different (changing one value of pred above to be more different than the rest will compute a correct PCC), however I think that this should still be able to be computed with half precision and the standard deviation of pred shown above. <!-- Add any other context about the problem here. --> </issue> <code> [start of src/torchmetrics/functional/regression/pearson.py] 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from typing import Tuple 15 16 import torch 17 from torch import Tensor 18 19 from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs 20 from torchmetrics.utilities.checks import _check_same_shape 21 22 23 def _pearson_corrcoef_update( 24 preds: Tensor, 25 target: Tensor, 26 mean_x: Tensor, 27 mean_y: Tensor, 28 var_x: Tensor, 29 var_y: Tensor, 30 corr_xy: Tensor, 31 n_prior: Tensor, 32 num_outputs: int, 33 ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: 34 """Update and returns variables required to compute Pearson Correlation Coefficient. 35 36 Check for same shape of input tensors. 37 38 Args: 39 preds: estimated scores 40 target: ground truth scores 41 mean_x: current mean estimate of x tensor 42 mean_y: current mean estimate of y tensor 43 var_x: current variance estimate of x tensor 44 var_y: current variance estimate of y tensor 45 corr_xy: current covariance estimate between x and y tensor 46 n_prior: current number of observed observations 47 num_outputs: Number of outputs in multioutput setting 48 """ 49 # Data checking 50 _check_same_shape(preds, target) 51 _check_data_shape_to_num_outputs(preds, target, num_outputs) 52 53 n_obs = preds.shape[0] 54 mx_new = (n_prior * mean_x + preds.mean(0) * n_obs) / (n_prior + n_obs) 55 my_new = (n_prior * mean_y + target.mean(0) * n_obs) / (n_prior + n_obs) 56 n_prior += n_obs 57 var_x += ((preds - mx_new) * (preds - mean_x)).sum(0) 58 var_y += ((target - my_new) * (target - mean_y)).sum(0) 59 corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0) 60 mean_x = mx_new 61 mean_y = my_new 62 63 return mean_x, mean_y, var_x, var_y, corr_xy, n_prior 64 65 66 def _pearson_corrcoef_compute( 67 var_x: Tensor, 68 var_y: Tensor, 69 corr_xy: Tensor, 70 nb: Tensor, 71 ) -> Tensor: 72 """Compute the final pearson correlation based on accumulated statistics. 73 74 Args: 75 var_x: variance estimate of x tensor 76 var_y: variance estimate of y tensor 77 corr_xy: covariance estimate between x and y tensor 78 nb: number of observations 79 """ 80 var_x /= nb - 1 81 var_y /= nb - 1 82 corr_xy /= nb - 1 83 corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze() 84 return torch.clamp(corrcoef, -1.0, 1.0) 85 86 87 def pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor: 88 """Compute pearson correlation coefficient. 89 90 Args: 91 preds: estimated scores 92 target: ground truth scores 93 94 Example (single output regression): 95 >>> from torchmetrics.functional.regression import pearson_corrcoef 96 >>> target = torch.tensor([3, -0.5, 2, 7]) 97 >>> preds = torch.tensor([2.5, 0.0, 2, 8]) 98 >>> pearson_corrcoef(preds, target) 99 tensor(0.9849) 100 101 Example (multi output regression): 102 >>> from torchmetrics.functional.regression import pearson_corrcoef 103 >>> target = torch.tensor([[3, -0.5], [2, 7]]) 104 >>> preds = torch.tensor([[2.5, 0.0], [2, 8]]) 105 >>> pearson_corrcoef(preds, target) 106 tensor([1., 1.]) 107 """ 108 d = preds.shape[1] if preds.ndim == 2 else 1 109 _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device) 110 mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone() 111 var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone() 112 _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update( 113 preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1] 114 ) 115 return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb) 116 [end of src/torchmetrics/functional/regression/pearson.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/torchmetrics/functional/regression/pearson.py b/src/torchmetrics/functional/regression/pearson.py --- a/src/torchmetrics/functional/regression/pearson.py +++ b/src/torchmetrics/functional/regression/pearson.py @@ -49,13 +49,25 @@ # Data checking _check_same_shape(preds, target) _check_data_shape_to_num_outputs(preds, target, num_outputs) + cond = n_prior.mean() > 0 n_obs = preds.shape[0] - mx_new = (n_prior * mean_x + preds.mean(0) * n_obs) / (n_prior + n_obs) - my_new = (n_prior * mean_y + target.mean(0) * n_obs) / (n_prior + n_obs) + if cond: + mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs) + my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs) + else: + mx_new = preds.mean(0) + my_new = target.mean(0) + n_prior += n_obs - var_x += ((preds - mx_new) * (preds - mean_x)).sum(0) - var_y += ((target - my_new) * (target - mean_y)).sum(0) + + if cond: + var_x += ((preds - mx_new) * (preds - mean_x)).sum(0) + var_y += ((target - my_new) * (target - mean_y)).sum(0) + + else: + var_x += preds.var(0) * (n_obs - 1) + var_y += target.var(0) * (n_obs - 1) corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0) mean_x = mx_new mean_y = my_new
{"golden_diff": "diff --git a/src/torchmetrics/functional/regression/pearson.py b/src/torchmetrics/functional/regression/pearson.py\n--- a/src/torchmetrics/functional/regression/pearson.py\n+++ b/src/torchmetrics/functional/regression/pearson.py\n@@ -49,13 +49,25 @@\n # Data checking\n _check_same_shape(preds, target)\n _check_data_shape_to_num_outputs(preds, target, num_outputs)\n+ cond = n_prior.mean() > 0\n \n n_obs = preds.shape[0]\n- mx_new = (n_prior * mean_x + preds.mean(0) * n_obs) / (n_prior + n_obs)\n- my_new = (n_prior * mean_y + target.mean(0) * n_obs) / (n_prior + n_obs)\n+ if cond:\n+ mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs)\n+ my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs)\n+ else:\n+ mx_new = preds.mean(0)\n+ my_new = target.mean(0)\n+\n n_prior += n_obs\n- var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)\n- var_y += ((target - my_new) * (target - mean_y)).sum(0)\n+\n+ if cond:\n+ var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)\n+ var_y += ((target - my_new) * (target - mean_y)).sum(0)\n+\n+ else:\n+ var_x += preds.var(0) * (n_obs - 1)\n+ var_y += target.var(0) * (n_obs - 1)\n corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)\n mean_x = mx_new\n mean_y = my_new\n", "issue": "PearsonCorrCoeff returns nan when input is of type torch.float16 or torch.bfloat16.\n## \ud83d\udc1b Bug\r\n\r\nPearsonCorrCoeff returns nan when input is of type torch.float16 or torch.bfloat16 and all values are close.\r\n\r\n### To Reproduce\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n<details>\r\n <summary>\r\n\r\n```python\r\nimport torch\r\nimport torchmetrics as tm\r\npcc = tm.regression.PearsonCorrCoef().to(\"cuda\")\r\npred = torch.tensor([0.4746, 0.4805, 0.4766, 0.4805, 0.4766, 0.4805, 0.4785, 0.4824, 0.4805],dtype=torch.float16).to(\"cuda\")\r\ntarget = torch.tensor([0.0336, 0.3676, 0.6302, 0.7192, 0.2295, 0.2886, 0.6302, 0.7096, 0.0208],dtype=torch.float16).to(\"cuda\")\r\nprint(pcc(pred,target))\r\nprint(pcc(pred.to(torch.float32),target.to(torch.float32)))\r\ntensor(nan, device='cuda:0')\r\ntensor(0.3720, device='cuda:0')\r\n```\r\n\r\n</summary>\r\n\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n</details>\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\nPython version: 3.10.9\r\nTorch version: 1.12.1\r\nTorchMetrics version: 0.11.1\r\nGPU device name: Tesla T4\r\nCUDA Version: 11.4 \r\n\r\n### Additional context\r\n\r\nWhen running in a training loop I found that some fraction (~30%) of steps would not produce a nan number when using torch.float16 or bfloat16, while the other ~70% would.\r\nThis seems to occur because the values in pred above are not very different (changing one value of pred above to be more different than the rest will compute a correct PCC), however I think that this should still be able to be computed with half precision and the standard deviation of pred shown above.\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _pearson_corrcoef_update(\n preds: Tensor,\n target: Tensor,\n mean_x: Tensor,\n mean_y: Tensor,\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n n_prior: Tensor,\n num_outputs: int,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Update and returns variables required to compute Pearson Correlation Coefficient.\n\n Check for same shape of input tensors.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n mean_x: current mean estimate of x tensor\n mean_y: current mean estimate of y tensor\n var_x: current variance estimate of x tensor\n var_y: current variance estimate of y tensor\n corr_xy: current covariance estimate between x and y tensor\n n_prior: current number of observed observations\n num_outputs: Number of outputs in multioutput setting\n \"\"\"\n # Data checking\n _check_same_shape(preds, target)\n _check_data_shape_to_num_outputs(preds, target, num_outputs)\n\n n_obs = preds.shape[0]\n mx_new = (n_prior * mean_x + preds.mean(0) * n_obs) / (n_prior + n_obs)\n my_new = (n_prior * mean_y + target.mean(0) * n_obs) / (n_prior + n_obs)\n n_prior += n_obs\n var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)\n var_y += ((target - my_new) * (target - mean_y)).sum(0)\n corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)\n mean_x = mx_new\n mean_y = my_new\n\n return mean_x, mean_y, var_x, var_y, corr_xy, n_prior\n\n\ndef _pearson_corrcoef_compute(\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n nb: Tensor,\n) -> Tensor:\n \"\"\"Compute the final pearson correlation based on accumulated statistics.\n\n Args:\n var_x: variance estimate of x tensor\n var_y: variance estimate of y tensor\n corr_xy: covariance estimate between x and y tensor\n nb: number of observations\n \"\"\"\n var_x /= nb - 1\n var_y /= nb - 1\n corr_xy /= nb - 1\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)\n\n\ndef pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Compute pearson correlation coefficient.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n\n Example (single output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> pearson_corrcoef(preds, target)\n tensor(0.9849)\n\n Example (multi output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([[3, -0.5], [2, 7]])\n >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])\n >>> pearson_corrcoef(preds, target)\n tensor([1., 1.])\n \"\"\"\n d = preds.shape[1] if preds.ndim == 2 else 1\n _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)\n mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()\n var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()\n _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(\n preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]\n )\n return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)\n", "path": "src/torchmetrics/functional/regression/pearson.py"}]}
2,468
446
gh_patches_debug_23415
rasdani/github-patches
git_diff
encode__starlette-563
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use "308 Permanent Redirect" for redirect slashes behavior. Hi, I stumbled upon a quirk in starlette that is not properly documented. It seems like all of my HTTP request to a route without trailing slash are being redirected to route with trailing slashes. Say I am hitting `http://hostname/mountpoint/api` it will be redirected (302) to `http://hostname/mountpoint/api/`. This messed up your api calls; if you call POST to `http://hostname/mountpoint/api` it will be redirected to GET `http://hostname/mountpoint/api/`. I dig on the source and see this redirect_slashes flag and setting it to False fix this. I feel this behavior (the auto redirection) should be documented. </issue> <code> [start of starlette/middleware/httpsredirect.py] 1 from starlette.datastructures import URL 2 from starlette.responses import RedirectResponse 3 from starlette.types import ASGIApp, Receive, Scope, Send 4 5 6 class HTTPSRedirectMiddleware: 7 def __init__(self, app: ASGIApp) -> None: 8 self.app = app 9 10 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: 11 if scope["type"] in ("http", "websocket") and scope["scheme"] in ("http", "ws"): 12 url = URL(scope=scope) 13 redirect_scheme = {"http": "https", "ws": "wss"}[url.scheme] 14 netloc = url.hostname if url.port in (80, 443) else url.netloc 15 url = url.replace(scheme=redirect_scheme, netloc=netloc) 16 response = RedirectResponse(url, status_code=301) 17 await response(scope, receive, send) 18 else: 19 await self.app(scope, receive, send) 20 [end of starlette/middleware/httpsredirect.py] [start of starlette/responses.py] 1 import hashlib 2 import http.cookies 3 import inspect 4 import json 5 import os 6 import stat 7 import typing 8 from email.utils import formatdate 9 from mimetypes import guess_type 10 from urllib.parse import quote_plus 11 12 from starlette.background import BackgroundTask 13 from starlette.concurrency import iterate_in_threadpool 14 from starlette.datastructures import URL, MutableHeaders 15 from starlette.types import Receive, Scope, Send 16 17 try: 18 import aiofiles 19 from aiofiles.os import stat as aio_stat 20 except ImportError: # pragma: nocover 21 aiofiles = None # type: ignore 22 aio_stat = None # type: ignore 23 24 try: 25 import ujson 26 except ImportError: # pragma: nocover 27 ujson = None # type: ignore 28 29 30 class Response: 31 media_type = None 32 charset = "utf-8" 33 34 def __init__( 35 self, 36 content: typing.Any = None, 37 status_code: int = 200, 38 headers: dict = None, 39 media_type: str = None, 40 background: BackgroundTask = None, 41 ) -> None: 42 self.body = self.render(content) 43 self.status_code = status_code 44 if media_type is not None: 45 self.media_type = media_type 46 self.background = background 47 self.init_headers(headers) 48 49 def render(self, content: typing.Any) -> bytes: 50 if content is None: 51 return b"" 52 if isinstance(content, bytes): 53 return content 54 return content.encode(self.charset) 55 56 def init_headers(self, headers: typing.Mapping[str, str] = None) -> None: 57 if headers is None: 58 raw_headers = [] # type: typing.List[typing.Tuple[bytes, bytes]] 59 populate_content_length = True 60 populate_content_type = True 61 else: 62 raw_headers = [ 63 (k.lower().encode("latin-1"), v.encode("latin-1")) 64 for k, v in headers.items() 65 ] 66 keys = [h[0] for h in raw_headers] 67 populate_content_length = b"content-length" not in keys 68 populate_content_type = b"content-type" not in keys 69 70 body = getattr(self, "body", b"") 71 if body and populate_content_length: 72 content_length = str(len(body)) 73 raw_headers.append((b"content-length", content_length.encode("latin-1"))) 74 75 content_type = self.media_type 76 if content_type is not None and populate_content_type: 77 if content_type.startswith("text/"): 78 content_type += "; charset=" + self.charset 79 raw_headers.append((b"content-type", content_type.encode("latin-1"))) 80 81 self.raw_headers = raw_headers 82 83 @property 84 def headers(self) -> MutableHeaders: 85 if not hasattr(self, "_headers"): 86 self._headers = MutableHeaders(raw=self.raw_headers) 87 return self._headers 88 89 def set_cookie( 90 self, 91 key: str, 92 value: str = "", 93 max_age: int = None, 94 expires: int = None, 95 path: str = "/", 96 domain: str = None, 97 secure: bool = False, 98 httponly: bool = False, 99 ) -> None: 100 cookie = http.cookies.SimpleCookie() 101 cookie[key] = value 102 if max_age is not None: 103 cookie[key]["max-age"] = max_age # type: ignore 104 if expires is not None: 105 cookie[key]["expires"] = expires # type: ignore 106 if path is not None: 107 cookie[key]["path"] = path 108 if domain is not None: 109 cookie[key]["domain"] = domain 110 if secure: 111 cookie[key]["secure"] = True # type: ignore 112 if httponly: 113 cookie[key]["httponly"] = True # type: ignore 114 cookie_val = cookie.output(header="").strip() 115 self.raw_headers.append((b"set-cookie", cookie_val.encode("latin-1"))) 116 117 def delete_cookie(self, key: str, path: str = "/", domain: str = None) -> None: 118 self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain) 119 120 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: 121 await send( 122 { 123 "type": "http.response.start", 124 "status": self.status_code, 125 "headers": self.raw_headers, 126 } 127 ) 128 await send({"type": "http.response.body", "body": self.body}) 129 130 if self.background is not None: 131 await self.background() 132 133 134 class HTMLResponse(Response): 135 media_type = "text/html" 136 137 138 class PlainTextResponse(Response): 139 media_type = "text/plain" 140 141 142 class JSONResponse(Response): 143 media_type = "application/json" 144 145 def render(self, content: typing.Any) -> bytes: 146 return json.dumps( 147 content, 148 ensure_ascii=False, 149 allow_nan=False, 150 indent=None, 151 separators=(",", ":"), 152 ).encode("utf-8") 153 154 155 class UJSONResponse(JSONResponse): 156 media_type = "application/json" 157 158 def render(self, content: typing.Any) -> bytes: 159 return ujson.dumps(content, ensure_ascii=False).encode("utf-8") 160 161 162 class RedirectResponse(Response): 163 def __init__( 164 self, url: typing.Union[str, URL], status_code: int = 302, headers: dict = None 165 ) -> None: 166 super().__init__(content=b"", status_code=status_code, headers=headers) 167 self.headers["location"] = quote_plus(str(url), safe=":/%#?&=@[]!$&'()*+,;") 168 169 170 class StreamingResponse(Response): 171 def __init__( 172 self, 173 content: typing.Any, 174 status_code: int = 200, 175 headers: dict = None, 176 media_type: str = None, 177 background: BackgroundTask = None, 178 ) -> None: 179 if inspect.isasyncgen(content): 180 self.body_iterator = content 181 else: 182 self.body_iterator = iterate_in_threadpool(content) 183 self.status_code = status_code 184 self.media_type = self.media_type if media_type is None else media_type 185 self.background = background 186 self.init_headers(headers) 187 188 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: 189 await send( 190 { 191 "type": "http.response.start", 192 "status": self.status_code, 193 "headers": self.raw_headers, 194 } 195 ) 196 async for chunk in self.body_iterator: 197 if not isinstance(chunk, bytes): 198 chunk = chunk.encode(self.charset) 199 await send({"type": "http.response.body", "body": chunk, "more_body": True}) 200 await send({"type": "http.response.body", "body": b"", "more_body": False}) 201 202 if self.background is not None: 203 await self.background() 204 205 206 class FileResponse(Response): 207 chunk_size = 4096 208 209 def __init__( 210 self, 211 path: str, 212 status_code: int = 200, 213 headers: dict = None, 214 media_type: str = None, 215 background: BackgroundTask = None, 216 filename: str = None, 217 stat_result: os.stat_result = None, 218 method: str = None, 219 ) -> None: 220 assert aiofiles is not None, "'aiofiles' must be installed to use FileResponse" 221 self.path = path 222 self.status_code = status_code 223 self.filename = filename 224 self.send_header_only = method is not None and method.upper() == "HEAD" 225 if media_type is None: 226 media_type = guess_type(filename or path)[0] or "text/plain" 227 self.media_type = media_type 228 self.background = background 229 self.init_headers(headers) 230 if self.filename is not None: 231 content_disposition = 'attachment; filename="{}"'.format(self.filename) 232 self.headers.setdefault("content-disposition", content_disposition) 233 self.stat_result = stat_result 234 if stat_result is not None: 235 self.set_stat_headers(stat_result) 236 237 def set_stat_headers(self, stat_result: os.stat_result) -> None: 238 content_length = str(stat_result.st_size) 239 last_modified = formatdate(stat_result.st_mtime, usegmt=True) 240 etag_base = str(stat_result.st_mtime) + "-" + str(stat_result.st_size) 241 etag = hashlib.md5(etag_base.encode()).hexdigest() 242 243 self.headers.setdefault("content-length", content_length) 244 self.headers.setdefault("last-modified", last_modified) 245 self.headers.setdefault("etag", etag) 246 247 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: 248 if self.stat_result is None: 249 try: 250 stat_result = await aio_stat(self.path) 251 self.set_stat_headers(stat_result) 252 except FileNotFoundError: 253 raise RuntimeError(f"File at path {self.path} does not exist.") 254 else: 255 mode = stat_result.st_mode 256 if not stat.S_ISREG(mode): 257 raise RuntimeError(f"File at path {self.path} is not a file.") 258 await send( 259 { 260 "type": "http.response.start", 261 "status": self.status_code, 262 "headers": self.raw_headers, 263 } 264 ) 265 if self.send_header_only: 266 await send({"type": "http.response.body"}) 267 else: 268 async with aiofiles.open(self.path, mode="rb") as file: 269 more_body = True 270 while more_body: 271 chunk = await file.read(self.chunk_size) 272 more_body = len(chunk) == self.chunk_size 273 await send( 274 { 275 "type": "http.response.body", 276 "body": chunk, 277 "more_body": more_body, 278 } 279 ) 280 if self.background is not None: 281 await self.background() 282 [end of starlette/responses.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/starlette/middleware/httpsredirect.py b/starlette/middleware/httpsredirect.py --- a/starlette/middleware/httpsredirect.py +++ b/starlette/middleware/httpsredirect.py @@ -13,7 +13,7 @@ redirect_scheme = {"http": "https", "ws": "wss"}[url.scheme] netloc = url.hostname if url.port in (80, 443) else url.netloc url = url.replace(scheme=redirect_scheme, netloc=netloc) - response = RedirectResponse(url, status_code=301) + response = RedirectResponse(url, status_code=308) await response(scope, receive, send) else: await self.app(scope, receive, send) diff --git a/starlette/responses.py b/starlette/responses.py --- a/starlette/responses.py +++ b/starlette/responses.py @@ -161,7 +161,7 @@ class RedirectResponse(Response): def __init__( - self, url: typing.Union[str, URL], status_code: int = 302, headers: dict = None + self, url: typing.Union[str, URL], status_code: int = 307, headers: dict = None ) -> None: super().__init__(content=b"", status_code=status_code, headers=headers) self.headers["location"] = quote_plus(str(url), safe=":/%#?&=@[]!$&'()*+,;")
{"golden_diff": "diff --git a/starlette/middleware/httpsredirect.py b/starlette/middleware/httpsredirect.py\n--- a/starlette/middleware/httpsredirect.py\n+++ b/starlette/middleware/httpsredirect.py\n@@ -13,7 +13,7 @@\n redirect_scheme = {\"http\": \"https\", \"ws\": \"wss\"}[url.scheme]\n netloc = url.hostname if url.port in (80, 443) else url.netloc\n url = url.replace(scheme=redirect_scheme, netloc=netloc)\n- response = RedirectResponse(url, status_code=301)\n+ response = RedirectResponse(url, status_code=308)\n await response(scope, receive, send)\n else:\n await self.app(scope, receive, send)\ndiff --git a/starlette/responses.py b/starlette/responses.py\n--- a/starlette/responses.py\n+++ b/starlette/responses.py\n@@ -161,7 +161,7 @@\n \n class RedirectResponse(Response):\n def __init__(\n- self, url: typing.Union[str, URL], status_code: int = 302, headers: dict = None\n+ self, url: typing.Union[str, URL], status_code: int = 307, headers: dict = None\n ) -> None:\n super().__init__(content=b\"\", status_code=status_code, headers=headers)\n self.headers[\"location\"] = quote_plus(str(url), safe=\":/%#?&=@[]!$&'()*+,;\")\n", "issue": "Use \"308 Permanent Redirect\" for redirect slashes behavior.\nHi,\r\nI stumbled upon a quirk in starlette that is not properly documented. It seems like all of my HTTP request to a route without trailing slash are being redirected to route with trailing slashes. Say I am hitting `http://hostname/mountpoint/api` it will be redirected (302) to `http://hostname/mountpoint/api/`. This messed up your api calls; if you call POST to `http://hostname/mountpoint/api` it will be redirected to GET `http://hostname/mountpoint/api/`.\r\n\r\nI dig on the source and see this redirect_slashes flag and setting it to False fix this. I feel this behavior (the auto redirection) should be documented.\n", "before_files": [{"content": "from starlette.datastructures import URL\nfrom starlette.responses import RedirectResponse\nfrom starlette.types import ASGIApp, Receive, Scope, Send\n\n\nclass HTTPSRedirectMiddleware:\n def __init__(self, app: ASGIApp) -> None:\n self.app = app\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] in (\"http\", \"websocket\") and scope[\"scheme\"] in (\"http\", \"ws\"):\n url = URL(scope=scope)\n redirect_scheme = {\"http\": \"https\", \"ws\": \"wss\"}[url.scheme]\n netloc = url.hostname if url.port in (80, 443) else url.netloc\n url = url.replace(scheme=redirect_scheme, netloc=netloc)\n response = RedirectResponse(url, status_code=301)\n await response(scope, receive, send)\n else:\n await self.app(scope, receive, send)\n", "path": "starlette/middleware/httpsredirect.py"}, {"content": "import hashlib\nimport http.cookies\nimport inspect\nimport json\nimport os\nimport stat\nimport typing\nfrom email.utils import formatdate\nfrom mimetypes import guess_type\nfrom urllib.parse import quote_plus\n\nfrom starlette.background import BackgroundTask\nfrom starlette.concurrency import iterate_in_threadpool\nfrom starlette.datastructures import URL, MutableHeaders\nfrom starlette.types import Receive, Scope, Send\n\ntry:\n import aiofiles\n from aiofiles.os import stat as aio_stat\nexcept ImportError: # pragma: nocover\n aiofiles = None # type: ignore\n aio_stat = None # type: ignore\n\ntry:\n import ujson\nexcept ImportError: # pragma: nocover\n ujson = None # type: ignore\n\n\nclass Response:\n media_type = None\n charset = \"utf-8\"\n\n def __init__(\n self,\n content: typing.Any = None,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n background: BackgroundTask = None,\n ) -> None:\n self.body = self.render(content)\n self.status_code = status_code\n if media_type is not None:\n self.media_type = media_type\n self.background = background\n self.init_headers(headers)\n\n def render(self, content: typing.Any) -> bytes:\n if content is None:\n return b\"\"\n if isinstance(content, bytes):\n return content\n return content.encode(self.charset)\n\n def init_headers(self, headers: typing.Mapping[str, str] = None) -> None:\n if headers is None:\n raw_headers = [] # type: typing.List[typing.Tuple[bytes, bytes]]\n populate_content_length = True\n populate_content_type = True\n else:\n raw_headers = [\n (k.lower().encode(\"latin-1\"), v.encode(\"latin-1\"))\n for k, v in headers.items()\n ]\n keys = [h[0] for h in raw_headers]\n populate_content_length = b\"content-length\" not in keys\n populate_content_type = b\"content-type\" not in keys\n\n body = getattr(self, \"body\", b\"\")\n if body and populate_content_length:\n content_length = str(len(body))\n raw_headers.append((b\"content-length\", content_length.encode(\"latin-1\")))\n\n content_type = self.media_type\n if content_type is not None and populate_content_type:\n if content_type.startswith(\"text/\"):\n content_type += \"; charset=\" + self.charset\n raw_headers.append((b\"content-type\", content_type.encode(\"latin-1\")))\n\n self.raw_headers = raw_headers\n\n @property\n def headers(self) -> MutableHeaders:\n if not hasattr(self, \"_headers\"):\n self._headers = MutableHeaders(raw=self.raw_headers)\n return self._headers\n\n def set_cookie(\n self,\n key: str,\n value: str = \"\",\n max_age: int = None,\n expires: int = None,\n path: str = \"/\",\n domain: str = None,\n secure: bool = False,\n httponly: bool = False,\n ) -> None:\n cookie = http.cookies.SimpleCookie()\n cookie[key] = value\n if max_age is not None:\n cookie[key][\"max-age\"] = max_age # type: ignore\n if expires is not None:\n cookie[key][\"expires\"] = expires # type: ignore\n if path is not None:\n cookie[key][\"path\"] = path\n if domain is not None:\n cookie[key][\"domain\"] = domain\n if secure:\n cookie[key][\"secure\"] = True # type: ignore\n if httponly:\n cookie[key][\"httponly\"] = True # type: ignore\n cookie_val = cookie.output(header=\"\").strip()\n self.raw_headers.append((b\"set-cookie\", cookie_val.encode(\"latin-1\")))\n\n def delete_cookie(self, key: str, path: str = \"/\", domain: str = None) -> None:\n self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.raw_headers,\n }\n )\n await send({\"type\": \"http.response.body\", \"body\": self.body})\n\n if self.background is not None:\n await self.background()\n\n\nclass HTMLResponse(Response):\n media_type = \"text/html\"\n\n\nclass PlainTextResponse(Response):\n media_type = \"text/plain\"\n\n\nclass JSONResponse(Response):\n media_type = \"application/json\"\n\n def render(self, content: typing.Any) -> bytes:\n return json.dumps(\n content,\n ensure_ascii=False,\n allow_nan=False,\n indent=None,\n separators=(\",\", \":\"),\n ).encode(\"utf-8\")\n\n\nclass UJSONResponse(JSONResponse):\n media_type = \"application/json\"\n\n def render(self, content: typing.Any) -> bytes:\n return ujson.dumps(content, ensure_ascii=False).encode(\"utf-8\")\n\n\nclass RedirectResponse(Response):\n def __init__(\n self, url: typing.Union[str, URL], status_code: int = 302, headers: dict = None\n ) -> None:\n super().__init__(content=b\"\", status_code=status_code, headers=headers)\n self.headers[\"location\"] = quote_plus(str(url), safe=\":/%#?&=@[]!$&'()*+,;\")\n\n\nclass StreamingResponse(Response):\n def __init__(\n self,\n content: typing.Any,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n background: BackgroundTask = None,\n ) -> None:\n if inspect.isasyncgen(content):\n self.body_iterator = content\n else:\n self.body_iterator = iterate_in_threadpool(content)\n self.status_code = status_code\n self.media_type = self.media_type if media_type is None else media_type\n self.background = background\n self.init_headers(headers)\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.raw_headers,\n }\n )\n async for chunk in self.body_iterator:\n if not isinstance(chunk, bytes):\n chunk = chunk.encode(self.charset)\n await send({\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": True})\n await send({\"type\": \"http.response.body\", \"body\": b\"\", \"more_body\": False})\n\n if self.background is not None:\n await self.background()\n\n\nclass FileResponse(Response):\n chunk_size = 4096\n\n def __init__(\n self,\n path: str,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n background: BackgroundTask = None,\n filename: str = None,\n stat_result: os.stat_result = None,\n method: str = None,\n ) -> None:\n assert aiofiles is not None, \"'aiofiles' must be installed to use FileResponse\"\n self.path = path\n self.status_code = status_code\n self.filename = filename\n self.send_header_only = method is not None and method.upper() == \"HEAD\"\n if media_type is None:\n media_type = guess_type(filename or path)[0] or \"text/plain\"\n self.media_type = media_type\n self.background = background\n self.init_headers(headers)\n if self.filename is not None:\n content_disposition = 'attachment; filename=\"{}\"'.format(self.filename)\n self.headers.setdefault(\"content-disposition\", content_disposition)\n self.stat_result = stat_result\n if stat_result is not None:\n self.set_stat_headers(stat_result)\n\n def set_stat_headers(self, stat_result: os.stat_result) -> None:\n content_length = str(stat_result.st_size)\n last_modified = formatdate(stat_result.st_mtime, usegmt=True)\n etag_base = str(stat_result.st_mtime) + \"-\" + str(stat_result.st_size)\n etag = hashlib.md5(etag_base.encode()).hexdigest()\n\n self.headers.setdefault(\"content-length\", content_length)\n self.headers.setdefault(\"last-modified\", last_modified)\n self.headers.setdefault(\"etag\", etag)\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if self.stat_result is None:\n try:\n stat_result = await aio_stat(self.path)\n self.set_stat_headers(stat_result)\n except FileNotFoundError:\n raise RuntimeError(f\"File at path {self.path} does not exist.\")\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n raise RuntimeError(f\"File at path {self.path} is not a file.\")\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.raw_headers,\n }\n )\n if self.send_header_only:\n await send({\"type\": \"http.response.body\"})\n else:\n async with aiofiles.open(self.path, mode=\"rb\") as file:\n more_body = True\n while more_body:\n chunk = await file.read(self.chunk_size)\n more_body = len(chunk) == self.chunk_size\n await send(\n {\n \"type\": \"http.response.body\",\n \"body\": chunk,\n \"more_body\": more_body,\n }\n )\n if self.background is not None:\n await self.background()\n", "path": "starlette/responses.py"}]}
3,822
334
gh_patches_debug_30926
rasdani/github-patches
git_diff
conda__conda-5820
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> conda 4.4 incorrect activate message I updated to conda 4.4.0b2 from conda v4.3. I created a new environment and it recommended the following - which I ran and got an error: To activate this environment, use $ conda activate anaconda2 To deactivate an active environment, use $ conda deactivate 0288-csoja:scripts csoja$ conda activate anaconda2 Traceback (most recent call last): File "/Users/csoja/Desktop/anaconda/lib/python3.5/site-packages/conda/cli/main.py", line 176, in main raise CommandNotFoundError(argv1) conda.exceptions.CommandNotFoundError: 'activate is not a conda command. Did you mean 'source activate'? During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/Users/csoja/Desktop/anaconda/bin/conda", line 6, in <module> sys.exit(conda.cli.main()) File "/Users/csoja/Desktop/anaconda/lib/python3.5/site-packages/conda/cli/main.py", line 178, in main from ..exceptions import handle_exception ImportError: cannot import name 'handle_exception' </issue> <code> [start of conda/cli/install.py] 1 # (c) Continuum Analytics, Inc. / http://continuum.io 2 # All Rights Reserved 3 # 4 # conda is distributed under the terms of the BSD 3-clause license. 5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. 6 7 from __future__ import absolute_import, division, print_function, unicode_literals 8 9 from logging import getLogger 10 import os 11 from os.path import abspath, basename, exists, isdir 12 13 from conda.models.match_spec import MatchSpec 14 from . import common 15 from .common import check_non_admin 16 from .._vendor.auxlib.ish import dals 17 from ..base.constants import ROOT_ENV_NAME 18 from ..base.context import context 19 from ..common.compat import text_type 20 from ..core.envs_manager import EnvsDirectory 21 from ..core.index import calculate_channel_urls, get_index 22 from ..core.solve import Solver 23 from ..exceptions import (CondaImportError, CondaOSError, CondaSystemExit, CondaValueError, 24 DirectoryNotFoundError, DryRunExit, EnvironmentLocationNotFound, 25 PackagesNotFoundError, TooManyArgumentsError, 26 UnsatisfiableError) 27 from ..misc import append_env, clone_env, explicit, touch_nonadmin 28 from ..plan import (revert_actions) 29 from ..resolve import ResolvePackageNotFound 30 31 log = getLogger(__name__) 32 stderrlog = getLogger('conda.stderr') 33 34 35 def check_prefix(prefix, json=False): 36 name = basename(prefix) 37 error = None 38 if name == ROOT_ENV_NAME: 39 error = "'%s' is a reserved environment name" % name 40 if exists(prefix): 41 if isdir(prefix) and 'conda-meta' not in os.listdir(prefix): 42 return None 43 error = "prefix already exists: %s" % prefix 44 45 if error: 46 raise CondaValueError(error, json) 47 48 if ' ' in prefix: 49 stderrlog.warn("WARNING: A space was detected in your requested environment path\n" 50 "'%s'\n" 51 "Spaces in paths can sometimes be problematic." % prefix) 52 53 54 def clone(src_arg, dst_prefix, json=False, quiet=False, index_args=None): 55 if os.sep in src_arg: 56 src_prefix = abspath(src_arg) 57 if not isdir(src_prefix): 58 raise DirectoryNotFoundError(src_arg) 59 else: 60 assert context._argparse_args.clone is not None 61 src_prefix = EnvsDirectory.locate_prefix_by_name(context._argparse_args.clone) 62 63 if not json: 64 print("Source: %s" % src_prefix) 65 print("Destination: %s" % dst_prefix) 66 67 actions, untracked_files = clone_env(src_prefix, dst_prefix, 68 verbose=not json, 69 quiet=quiet, 70 index_args=index_args) 71 72 if json: 73 common.stdout_json_success( 74 actions=actions, 75 untracked_files=list(untracked_files), 76 src_prefix=src_prefix, 77 dst_prefix=dst_prefix 78 ) 79 80 81 def print_activate(env_name_or_prefix): # pragma: no cover 82 if not context.quiet and not context.json: 83 message = dals(""" 84 85 To activate this environment, use 86 87 $ conda activate %s 88 89 To deactivate an active environment, use 90 91 $ conda deactivate 92 """) % env_name_or_prefix 93 print(message) # TODO: use logger 94 95 96 def get_revision(arg, json=False): 97 try: 98 return int(arg) 99 except ValueError: 100 CondaValueError("expected revision number, not: '%s'" % arg, json) 101 102 103 def install(args, parser, command='install'): 104 """ 105 conda install, conda update, and conda create 106 """ 107 context.validate_configuration() 108 check_non_admin() 109 110 newenv = bool(command == 'create') 111 isupdate = bool(command == 'update') 112 isinstall = bool(command == 'install') 113 if newenv: 114 common.ensure_name_or_prefix(args, command) 115 prefix = context.target_prefix 116 if newenv: 117 check_prefix(prefix, json=context.json) 118 if context.force_32bit and prefix == context.root_prefix: 119 raise CondaValueError("cannot use CONDA_FORCE_32BIT=1 in root env") 120 if isupdate and not (args.file or args.all or args.packages): 121 raise CondaValueError("""no package names supplied 122 # If you want to update to a newer version of Anaconda, type: 123 # 124 # $ conda update --prefix %s anaconda 125 """ % prefix) 126 127 args_packages = [s.strip('"\'') for s in args.packages] 128 if newenv and not args.no_default_packages: 129 # Override defaults if they are specified at the command line 130 # TODO: rework in 4.4 branch using MatchSpec 131 args_packages_names = [pkg.replace(' ', '=').split('=', 1)[0] for pkg in args_packages] 132 for default_pkg in context.create_default_packages: 133 default_pkg_name = default_pkg.replace(' ', '=').split('=', 1)[0] 134 if default_pkg_name not in args_packages_names: 135 args_packages.append(default_pkg) 136 args_packages.extend(text_type(MatchSpec(provides_features=ft)) for ft in args.features or ()) 137 138 index_args = { 139 'use_cache': args.use_index_cache, 140 'channel_urls': context.channels, 141 'unknown': args.unknown, 142 'prepend': not args.override_channels, 143 'use_local': args.use_local 144 } 145 146 num_cp = sum(s.endswith('.tar.bz2') for s in args_packages) 147 if num_cp: 148 if num_cp == len(args_packages): 149 explicit(args_packages, prefix, verbose=not context.quiet) 150 return 151 else: 152 raise CondaValueError("cannot mix specifications with conda package" 153 " filenames") 154 155 specs = [] 156 if args.file: 157 for fpath in args.file: 158 specs.extend(common.specs_from_url(fpath, json=context.json)) 159 if '@EXPLICIT' in specs: 160 explicit(specs, prefix, verbose=not context.quiet, index_args=index_args) 161 return 162 specs.extend(common.specs_from_args(args_packages, json=context.json)) 163 164 if isinstall and args.revision: 165 get_revision(args.revision, json=context.json) 166 elif isinstall and not (args.file or args_packages): 167 raise CondaValueError("too few arguments, " 168 "must supply command line package specs or --file") 169 170 if newenv and args.clone: 171 if args.packages: 172 raise TooManyArgumentsError(0, len(args.packages), list(args.packages), 173 'did not expect any arguments for --clone') 174 175 clone(args.clone, prefix, json=context.json, quiet=context.quiet, index_args=index_args) 176 append_env(prefix) 177 touch_nonadmin(prefix) 178 print_activate(args.name if args.name else prefix) 179 return 180 181 if not isdir(prefix) and not newenv: 182 if args.mkdir: 183 try: 184 os.makedirs(prefix) 185 except OSError: 186 raise CondaOSError("Error: could not create directory: %s" % prefix) 187 else: 188 raise EnvironmentLocationNotFound(prefix) 189 190 try: 191 if isinstall and args.revision: 192 index = get_index(channel_urls=index_args['channel_urls'], 193 prepend=index_args['prepend'], platform=None, 194 use_local=index_args['use_local'], use_cache=index_args['use_cache'], 195 unknown=index_args['unknown'], prefix=prefix) 196 unlink_link_transaction = revert_actions(prefix, get_revision(args.revision), index) 197 progressive_fetch_extract = unlink_link_transaction.get_pfe() 198 else: 199 solver = Solver(prefix, context.channels, context.subdirs, specs_to_add=specs) 200 unlink_link_transaction = solver.solve_for_transaction( 201 force_reinstall=context.force, 202 ) 203 progressive_fetch_extract = unlink_link_transaction.get_pfe() 204 205 except ResolvePackageNotFound as e: 206 channels_urls = tuple(calculate_channel_urls( 207 channel_urls=index_args['channel_urls'], 208 prepend=index_args['prepend'], 209 platform=None, 210 use_local=index_args['use_local'], 211 )) 212 raise PackagesNotFoundError(e.bad_deps, channels_urls) 213 214 except (UnsatisfiableError, SystemExit) as e: 215 # Unsatisfiable package specifications/no such revision/import error 216 if e.args and 'could not import' in e.args[0]: 217 raise CondaImportError(text_type(e)) 218 raise 219 220 handle_txn(progressive_fetch_extract, unlink_link_transaction, prefix, args, newenv) 221 222 223 def handle_txn(progressive_fetch_extract, unlink_link_transaction, prefix, args, newenv, 224 remove_op=False): 225 if unlink_link_transaction.nothing_to_do: 226 if remove_op: 227 # No packages found to remove from environment 228 raise PackagesNotFoundError(args.package_names) 229 elif not newenv: 230 if context.json: 231 common.stdout_json_success(message='All requested packages already installed.') 232 else: 233 print('\n# All requested packages already installed.\n') 234 return 235 236 if not context.json: 237 unlink_link_transaction.display_actions(progressive_fetch_extract) 238 common.confirm_yn() 239 240 elif context.dry_run: 241 common.stdout_json_success(unlink_link_transaction=unlink_link_transaction, prefix=prefix, 242 dry_run=True) 243 raise DryRunExit() 244 245 try: 246 progressive_fetch_extract.execute() 247 unlink_link_transaction.execute() 248 249 except SystemExit as e: 250 raise CondaSystemExit('Exiting', e) 251 252 if newenv: 253 append_env(prefix) 254 touch_nonadmin(prefix) 255 print_activate(args.name if args.name else prefix) 256 257 if context.json: 258 actions = unlink_link_transaction.make_legacy_action_groups(progressive_fetch_extract)[0] 259 common.stdout_json_success(actions=actions) 260 [end of conda/cli/install.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conda/cli/install.py b/conda/cli/install.py --- a/conda/cli/install.py +++ b/conda/cli/install.py @@ -16,7 +16,7 @@ from .._vendor.auxlib.ish import dals from ..base.constants import ROOT_ENV_NAME from ..base.context import context -from ..common.compat import text_type +from ..common.compat import text_type, on_win from ..core.envs_manager import EnvsDirectory from ..core.index import calculate_channel_urls, get_index from ..core.solve import Solver @@ -80,16 +80,39 @@ def print_activate(env_name_or_prefix): # pragma: no cover if not context.quiet and not context.json: - message = dals(""" - - To activate this environment, use - - $ conda activate %s - - To deactivate an active environment, use - - $ conda deactivate - """) % env_name_or_prefix + if 'CONDA_SHLVL' in os.environ or os.path.split(os.environ.get('SHELL', ''))[-1] == 'fish': + message = dals(""" + # + # To activate this environment, use + # + # $ conda activate %s + # + # To deactivate an active environment, use + # + # $ conda deactivate + """) % env_name_or_prefix + elif on_win: + message = dals(""" + # + # To activate this environment, use: + # > activate %s + # + # To deactivate an active environment, use: + # > deactivate + # + # * for power-users using bash, you must source + # + """) % env_name_or_prefix + else: + message = dals(""" + # + # To activate this environment, use: + # > source activate %s + # + # To deactivate an active environment, use: + # > source deactivate + # + """) % env_name_or_prefix print(message) # TODO: use logger
{"golden_diff": "diff --git a/conda/cli/install.py b/conda/cli/install.py\n--- a/conda/cli/install.py\n+++ b/conda/cli/install.py\n@@ -16,7 +16,7 @@\n from .._vendor.auxlib.ish import dals\n from ..base.constants import ROOT_ENV_NAME\n from ..base.context import context\n-from ..common.compat import text_type\n+from ..common.compat import text_type, on_win\n from ..core.envs_manager import EnvsDirectory\n from ..core.index import calculate_channel_urls, get_index\n from ..core.solve import Solver\n@@ -80,16 +80,39 @@\n \n def print_activate(env_name_or_prefix): # pragma: no cover\n if not context.quiet and not context.json:\n- message = dals(\"\"\"\n-\n- To activate this environment, use\n-\n- $ conda activate %s\n-\n- To deactivate an active environment, use\n-\n- $ conda deactivate\n- \"\"\") % env_name_or_prefix\n+ if 'CONDA_SHLVL' in os.environ or os.path.split(os.environ.get('SHELL', ''))[-1] == 'fish':\n+ message = dals(\"\"\"\n+ #\n+ # To activate this environment, use\n+ #\n+ # $ conda activate %s\n+ #\n+ # To deactivate an active environment, use\n+ #\n+ # $ conda deactivate\n+ \"\"\") % env_name_or_prefix\n+ elif on_win:\n+ message = dals(\"\"\"\n+ #\n+ # To activate this environment, use:\n+ # > activate %s\n+ #\n+ # To deactivate an active environment, use:\n+ # > deactivate\n+ #\n+ # * for power-users using bash, you must source\n+ #\n+ \"\"\") % env_name_or_prefix\n+ else:\n+ message = dals(\"\"\"\n+ #\n+ # To activate this environment, use:\n+ # > source activate %s\n+ #\n+ # To deactivate an active environment, use:\n+ # > source deactivate\n+ #\n+ \"\"\") % env_name_or_prefix\n print(message) # TODO: use logger\n", "issue": "conda 4.4 incorrect activate message\nI updated to conda 4.4.0b2 from conda v4.3. I created a new environment and it recommended the following - which I ran and got an error:\r\n\r\nTo activate this environment, use\r\n\r\n $ conda activate anaconda2\r\n\r\nTo deactivate an active environment, use\r\n\r\n $ conda deactivate\r\n\r\n0288-csoja:scripts csoja$ conda activate anaconda2\r\nTraceback (most recent call last):\r\n File \"/Users/csoja/Desktop/anaconda/lib/python3.5/site-packages/conda/cli/main.py\", line 176, in main\r\n raise CommandNotFoundError(argv1)\r\nconda.exceptions.CommandNotFoundError: 'activate is not a conda command.\r\nDid you mean 'source activate'?\r\n\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/Users/csoja/Desktop/anaconda/bin/conda\", line 6, in <module>\r\n sys.exit(conda.cli.main())\r\n File \"/Users/csoja/Desktop/anaconda/lib/python3.5/site-packages/conda/cli/main.py\", line 178, in main\r\n from ..exceptions import handle_exception\r\nImportError: cannot import name 'handle_exception'\n", "before_files": [{"content": "# (c) Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger\nimport os\nfrom os.path import abspath, basename, exists, isdir\n\nfrom conda.models.match_spec import MatchSpec\nfrom . import common\nfrom .common import check_non_admin\nfrom .._vendor.auxlib.ish import dals\nfrom ..base.constants import ROOT_ENV_NAME\nfrom ..base.context import context\nfrom ..common.compat import text_type\nfrom ..core.envs_manager import EnvsDirectory\nfrom ..core.index import calculate_channel_urls, get_index\nfrom ..core.solve import Solver\nfrom ..exceptions import (CondaImportError, CondaOSError, CondaSystemExit, CondaValueError,\n DirectoryNotFoundError, DryRunExit, EnvironmentLocationNotFound,\n PackagesNotFoundError, TooManyArgumentsError,\n UnsatisfiableError)\nfrom ..misc import append_env, clone_env, explicit, touch_nonadmin\nfrom ..plan import (revert_actions)\nfrom ..resolve import ResolvePackageNotFound\n\nlog = getLogger(__name__)\nstderrlog = getLogger('conda.stderr')\n\n\ndef check_prefix(prefix, json=False):\n name = basename(prefix)\n error = None\n if name == ROOT_ENV_NAME:\n error = \"'%s' is a reserved environment name\" % name\n if exists(prefix):\n if isdir(prefix) and 'conda-meta' not in os.listdir(prefix):\n return None\n error = \"prefix already exists: %s\" % prefix\n\n if error:\n raise CondaValueError(error, json)\n\n if ' ' in prefix:\n stderrlog.warn(\"WARNING: A space was detected in your requested environment path\\n\"\n \"'%s'\\n\"\n \"Spaces in paths can sometimes be problematic.\" % prefix)\n\n\ndef clone(src_arg, dst_prefix, json=False, quiet=False, index_args=None):\n if os.sep in src_arg:\n src_prefix = abspath(src_arg)\n if not isdir(src_prefix):\n raise DirectoryNotFoundError(src_arg)\n else:\n assert context._argparse_args.clone is not None\n src_prefix = EnvsDirectory.locate_prefix_by_name(context._argparse_args.clone)\n\n if not json:\n print(\"Source: %s\" % src_prefix)\n print(\"Destination: %s\" % dst_prefix)\n\n actions, untracked_files = clone_env(src_prefix, dst_prefix,\n verbose=not json,\n quiet=quiet,\n index_args=index_args)\n\n if json:\n common.stdout_json_success(\n actions=actions,\n untracked_files=list(untracked_files),\n src_prefix=src_prefix,\n dst_prefix=dst_prefix\n )\n\n\ndef print_activate(env_name_or_prefix): # pragma: no cover\n if not context.quiet and not context.json:\n message = dals(\"\"\"\n\n To activate this environment, use\n\n $ conda activate %s\n\n To deactivate an active environment, use\n\n $ conda deactivate\n \"\"\") % env_name_or_prefix\n print(message) # TODO: use logger\n\n\ndef get_revision(arg, json=False):\n try:\n return int(arg)\n except ValueError:\n CondaValueError(\"expected revision number, not: '%s'\" % arg, json)\n\n\ndef install(args, parser, command='install'):\n \"\"\"\n conda install, conda update, and conda create\n \"\"\"\n context.validate_configuration()\n check_non_admin()\n\n newenv = bool(command == 'create')\n isupdate = bool(command == 'update')\n isinstall = bool(command == 'install')\n if newenv:\n common.ensure_name_or_prefix(args, command)\n prefix = context.target_prefix\n if newenv:\n check_prefix(prefix, json=context.json)\n if context.force_32bit and prefix == context.root_prefix:\n raise CondaValueError(\"cannot use CONDA_FORCE_32BIT=1 in root env\")\n if isupdate and not (args.file or args.all or args.packages):\n raise CondaValueError(\"\"\"no package names supplied\n# If you want to update to a newer version of Anaconda, type:\n#\n# $ conda update --prefix %s anaconda\n\"\"\" % prefix)\n\n args_packages = [s.strip('\"\\'') for s in args.packages]\n if newenv and not args.no_default_packages:\n # Override defaults if they are specified at the command line\n # TODO: rework in 4.4 branch using MatchSpec\n args_packages_names = [pkg.replace(' ', '=').split('=', 1)[0] for pkg in args_packages]\n for default_pkg in context.create_default_packages:\n default_pkg_name = default_pkg.replace(' ', '=').split('=', 1)[0]\n if default_pkg_name not in args_packages_names:\n args_packages.append(default_pkg)\n args_packages.extend(text_type(MatchSpec(provides_features=ft)) for ft in args.features or ())\n\n index_args = {\n 'use_cache': args.use_index_cache,\n 'channel_urls': context.channels,\n 'unknown': args.unknown,\n 'prepend': not args.override_channels,\n 'use_local': args.use_local\n }\n\n num_cp = sum(s.endswith('.tar.bz2') for s in args_packages)\n if num_cp:\n if num_cp == len(args_packages):\n explicit(args_packages, prefix, verbose=not context.quiet)\n return\n else:\n raise CondaValueError(\"cannot mix specifications with conda package\"\n \" filenames\")\n\n specs = []\n if args.file:\n for fpath in args.file:\n specs.extend(common.specs_from_url(fpath, json=context.json))\n if '@EXPLICIT' in specs:\n explicit(specs, prefix, verbose=not context.quiet, index_args=index_args)\n return\n specs.extend(common.specs_from_args(args_packages, json=context.json))\n\n if isinstall and args.revision:\n get_revision(args.revision, json=context.json)\n elif isinstall and not (args.file or args_packages):\n raise CondaValueError(\"too few arguments, \"\n \"must supply command line package specs or --file\")\n\n if newenv and args.clone:\n if args.packages:\n raise TooManyArgumentsError(0, len(args.packages), list(args.packages),\n 'did not expect any arguments for --clone')\n\n clone(args.clone, prefix, json=context.json, quiet=context.quiet, index_args=index_args)\n append_env(prefix)\n touch_nonadmin(prefix)\n print_activate(args.name if args.name else prefix)\n return\n\n if not isdir(prefix) and not newenv:\n if args.mkdir:\n try:\n os.makedirs(prefix)\n except OSError:\n raise CondaOSError(\"Error: could not create directory: %s\" % prefix)\n else:\n raise EnvironmentLocationNotFound(prefix)\n\n try:\n if isinstall and args.revision:\n index = get_index(channel_urls=index_args['channel_urls'],\n prepend=index_args['prepend'], platform=None,\n use_local=index_args['use_local'], use_cache=index_args['use_cache'],\n unknown=index_args['unknown'], prefix=prefix)\n unlink_link_transaction = revert_actions(prefix, get_revision(args.revision), index)\n progressive_fetch_extract = unlink_link_transaction.get_pfe()\n else:\n solver = Solver(prefix, context.channels, context.subdirs, specs_to_add=specs)\n unlink_link_transaction = solver.solve_for_transaction(\n force_reinstall=context.force,\n )\n progressive_fetch_extract = unlink_link_transaction.get_pfe()\n\n except ResolvePackageNotFound as e:\n channels_urls = tuple(calculate_channel_urls(\n channel_urls=index_args['channel_urls'],\n prepend=index_args['prepend'],\n platform=None,\n use_local=index_args['use_local'],\n ))\n raise PackagesNotFoundError(e.bad_deps, channels_urls)\n\n except (UnsatisfiableError, SystemExit) as e:\n # Unsatisfiable package specifications/no such revision/import error\n if e.args and 'could not import' in e.args[0]:\n raise CondaImportError(text_type(e))\n raise\n\n handle_txn(progressive_fetch_extract, unlink_link_transaction, prefix, args, newenv)\n\n\ndef handle_txn(progressive_fetch_extract, unlink_link_transaction, prefix, args, newenv,\n remove_op=False):\n if unlink_link_transaction.nothing_to_do:\n if remove_op:\n # No packages found to remove from environment\n raise PackagesNotFoundError(args.package_names)\n elif not newenv:\n if context.json:\n common.stdout_json_success(message='All requested packages already installed.')\n else:\n print('\\n# All requested packages already installed.\\n')\n return\n\n if not context.json:\n unlink_link_transaction.display_actions(progressive_fetch_extract)\n common.confirm_yn()\n\n elif context.dry_run:\n common.stdout_json_success(unlink_link_transaction=unlink_link_transaction, prefix=prefix,\n dry_run=True)\n raise DryRunExit()\n\n try:\n progressive_fetch_extract.execute()\n unlink_link_transaction.execute()\n\n except SystemExit as e:\n raise CondaSystemExit('Exiting', e)\n\n if newenv:\n append_env(prefix)\n touch_nonadmin(prefix)\n print_activate(args.name if args.name else prefix)\n\n if context.json:\n actions = unlink_link_transaction.make_legacy_action_groups(progressive_fetch_extract)[0]\n common.stdout_json_success(actions=actions)\n", "path": "conda/cli/install.py"}]}
3,547
485
gh_patches_debug_5110
rasdani/github-patches
git_diff
mindsdb__mindsdb-177
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError: 'PredictTransactionOutputData' object has no attribute 'predicted_values' **Describe the bug** After running predict.py in the example mindsdb/docs/examples/time_series/ I got the following AttributeError: ``` Traceback (most recent call last): File "predict.py", line 12, in <module> print(result.predicted_values) AttributeError: 'PredictTransactionOutputData' object has no attribute 'predicted_values' ``` **To Reproduce** Steps to reproduce the behavior: 1. First run train.py, with python3 train.py 2. When training is finished, run predict.py with python3 predict.py 3. See error **Expected behavior** I expected to see the predicted values. **Desktop (please complete the following information):** - OS: Ubuntu 18.04.2 LTS - mindsdb 1.0.5 - pip 19.1 - python 3.6.7 - virtualenv 15.1.0 - urllib3 1.24 **Additional context** Before the Traceback I got the following warning many times: ``` WARNING:mindsdb-logger-core-logger:libs/backends/ludwig.py:141 - ('Missing previous predicted values for output column: ' 'Main_Engine_Fuel_Consumption_MT_day, these should be included in your input ' 'under the name: previous_Main_Engine_Fuel_Consumption_MT_day') ``` Finally, I've installed mindsdb using pip3 inside a virtualenvironment. </issue> <code> [start of docs/examples/time_series/predict.py] 1 """ 2 3 """ 4 5 from mindsdb import Predictor 6 7 # Here we use the model to make predictions (NOTE: You need to run train.py first) 8 result = Predictor(name='fuel').predict(when_data = 'fuel_predict.csv') 9 10 # you can now print the results 11 print('The predicted main engine fuel consumption') 12 print(result.predicted_values) [end of docs/examples/time_series/predict.py] [start of docs/examples/nlp/predict.py] 1 from mindsdb import * 2 3 mdb = Predictor(name='real_estate_desc') 4 5 # Here we use the model to make predictions (NOTE: You need to run train.py first) 6 result = mdb.predict( 7 when={ 8 "description": """A true gem 9 rooms: 2 10 bathrooms: 0 11 neighboorhood: thowsand_oaks 12 amenities: parking 13 area: 84.0291068642868 14 condition: great ! 15 """ 16 } 17 ) 18 19 # you can now print the results 20 print('The predicted number of rooms') 21 print(result.predicted_values) 22 [end of docs/examples/nlp/predict.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/examples/nlp/predict.py b/docs/examples/nlp/predict.py --- a/docs/examples/nlp/predict.py +++ b/docs/examples/nlp/predict.py @@ -18,4 +18,4 @@ # you can now print the results print('The predicted number of rooms') -print(result.predicted_values) +print(result) diff --git a/docs/examples/time_series/predict.py b/docs/examples/time_series/predict.py --- a/docs/examples/time_series/predict.py +++ b/docs/examples/time_series/predict.py @@ -9,4 +9,5 @@ # you can now print the results print('The predicted main engine fuel consumption') -print(result.predicted_values) \ No newline at end of file +for row in result: + print(row)
{"golden_diff": "diff --git a/docs/examples/nlp/predict.py b/docs/examples/nlp/predict.py\n--- a/docs/examples/nlp/predict.py\n+++ b/docs/examples/nlp/predict.py\n@@ -18,4 +18,4 @@\n \n # you can now print the results\n print('The predicted number of rooms')\n-print(result.predicted_values)\n+print(result)\ndiff --git a/docs/examples/time_series/predict.py b/docs/examples/time_series/predict.py\n--- a/docs/examples/time_series/predict.py\n+++ b/docs/examples/time_series/predict.py\n@@ -9,4 +9,5 @@\n \n # you can now print the results\n print('The predicted main engine fuel consumption')\n-print(result.predicted_values)\n\\ No newline at end of file\n+for row in result:\n+ print(row)\n", "issue": "AttributeError: 'PredictTransactionOutputData' object has no attribute 'predicted_values'\n**Describe the bug**\r\nAfter running predict.py in the example mindsdb/docs/examples/time_series/ I got the following AttributeError:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"predict.py\", line 12, in <module>\r\n print(result.predicted_values)\r\nAttributeError: 'PredictTransactionOutputData' object has no attribute 'predicted_values'\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. First run train.py, with python3 train.py\r\n2. When training is finished, run predict.py with python3 predict.py\r\n3. See error\r\n\r\n**Expected behavior**\r\nI expected to see the predicted values.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu 18.04.2 LTS\r\n- mindsdb 1.0.5\r\n- pip 19.1\r\n- python 3.6.7\r\n- virtualenv 15.1.0\r\n- urllib3 1.24\r\n\r\n**Additional context**\r\nBefore the Traceback I got the following warning many times:\r\n\r\n```\r\nWARNING:mindsdb-logger-core-logger:libs/backends/ludwig.py:141 - ('Missing previous predicted values for output column: '\r\n 'Main_Engine_Fuel_Consumption_MT_day, these should be included in your input '\r\n 'under the name: previous_Main_Engine_Fuel_Consumption_MT_day')\r\n```\r\nFinally, I've installed mindsdb using pip3 inside a virtualenvironment.\r\n\n", "before_files": [{"content": "\"\"\"\n\n\"\"\"\n\nfrom mindsdb import Predictor\n\n# Here we use the model to make predictions (NOTE: You need to run train.py first)\nresult = Predictor(name='fuel').predict(when_data = 'fuel_predict.csv')\n\n# you can now print the results\nprint('The predicted main engine fuel consumption')\nprint(result.predicted_values)", "path": "docs/examples/time_series/predict.py"}, {"content": "from mindsdb import *\n\nmdb = Predictor(name='real_estate_desc')\n\n# Here we use the model to make predictions (NOTE: You need to run train.py first)\nresult = mdb.predict(\n when={\n \"description\": \"\"\"A true gem\n rooms: 2\n bathrooms: 0\n neighboorhood: thowsand_oaks\n amenities: parking\n area: 84.0291068642868\n condition: great !\n \"\"\"\n }\n)\n\n# you can now print the results\nprint('The predicted number of rooms')\nprint(result.predicted_values)\n", "path": "docs/examples/nlp/predict.py"}]}
1,145
172
gh_patches_debug_24483
rasdani/github-patches
git_diff
sublimelsp__LSP-1335
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Triggers completion command on the wrong session When having both LSP-css and LSP-stylelint running for open file (*.css), the completions are requested from LSP-css but the `command` that the completion item triggers runs on `LSP-stylelint` triggering an error dialog. An example completion item looks like this: ``` { "command": { "command": "editor.action.triggerSuggest", "title": "Suggest" }, "documentation": { "kind": "plaintext", "value": "Specifies the width of the content area, padding area or border area (depending on 'box-sizing') of certain boxes.\n\nSyntax: <viewport-length>{1,2}\n\nMDN Reference: https://developer.mozilla.org/docs/Web/CSS/width" }, "insertTextFormat": 2, "kind": 10, "label": "width", "sortText": "d_0000", "tags": [], "textEdit": { "newText": "width: $0;", "range": { "end": { "character": 5, "line": 4 }, "start": { "character": 4, "line": 4 } } } }, ``` It has a `command` that runs after inserting completion. </issue> <code> [start of plugin/completion.py] 1 import mdpopups 2 import sublime 3 import sublime_plugin 4 import webbrowser 5 from .core.css import css 6 from .core.logging import debug 7 from .core.edit import parse_text_edit 8 from .core.protocol import Request, InsertTextFormat, Range 9 from .core.registry import LspTextCommand 10 from .core.typing import Any, List, Dict, Optional, Generator, Union 11 from .core.views import FORMAT_STRING, FORMAT_MARKUP_CONTENT, minihtml 12 from .core.views import range_to_region 13 14 15 class LspResolveDocsCommand(LspTextCommand): 16 17 completions = [] # type: List[Dict[str, Any]] 18 19 def run(self, edit: sublime.Edit, index: int, event: Optional[dict] = None) -> None: 20 item = self.completions[index] 21 detail = self.format_documentation(item.get('detail') or "") 22 documentation = self.format_documentation(item.get("documentation") or "") 23 # don't show the detail in the cooperate AC popup if it is already shown in the AC details filed. 24 self.is_detail_shown = bool(detail) 25 if not detail or not documentation: 26 # To make sure that the detail or documentation fields doesn't exist we need to resove the completion item. 27 # If those fields appear after the item is resolved we show them in the popup. 28 session = self.best_session('completionProvider.resolveProvider') 29 if session: 30 session.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response) 31 return 32 minihtml_content = self.get_content(documentation, detail) 33 self.show_popup(minihtml_content) 34 35 def format_documentation(self, content: Union[str, Dict[str, str]]) -> str: 36 return minihtml(self.view, content, allowed_formats=FORMAT_STRING | FORMAT_MARKUP_CONTENT) 37 38 def get_content(self, documentation: str, detail: str) -> str: 39 content = "" 40 if detail and not self.is_detail_shown: 41 content += "<div class='highlight'>{}</div>".format(detail) 42 if documentation: 43 content += "<div>{}</div>".format(documentation) 44 return content 45 46 def show_popup(self, minihtml_content: str) -> None: 47 viewport_width = self.view.viewport_extent()[0] 48 mdpopups.show_popup( 49 self.view, 50 minihtml_content, 51 flags=sublime.COOPERATE_WITH_AUTO_COMPLETE, 52 css=css().popups, 53 wrapper_class=css().popups_classname, 54 max_width=viewport_width, 55 on_navigate=self.on_navigate 56 ) 57 58 def on_navigate(self, url: str) -> None: 59 webbrowser.open(url) 60 61 def handle_resolve_response(self, item: Optional[dict]) -> None: 62 detail = "" 63 documentation = "" 64 if item: 65 detail = self.format_documentation(item.get('detail') or "") 66 documentation = self.format_documentation(item.get("documentation") or "") 67 if not documentation: 68 documentation = self.format_documentation({"kind": "markdown", "value": "*No documentation available.*"}) 69 minihtml_content = self.get_content(documentation, detail) 70 show = self.update_popup if self.view.is_popup_visible() else self.show_popup 71 # NOTE: Update/show popups from the main thread, or else the popup might make the AC widget disappear. 72 sublime.set_timeout(lambda: show(minihtml_content)) 73 74 def update_popup(self, minihtml_content: str) -> None: 75 mdpopups.update_popup( 76 self.view, 77 minihtml_content, 78 css=css().popups, 79 wrapper_class=css().popups_classname, 80 ) 81 82 83 class LspCompleteCommand(sublime_plugin.TextCommand): 84 85 def epilogue(self, item: Dict[str, Any]) -> None: 86 additional_edits = item.get('additionalTextEdits') 87 if additional_edits: 88 edits = [parse_text_edit(additional_edit) for additional_edit in additional_edits] 89 self.view.run_command("lsp_apply_document_edit", {'changes': edits}) 90 command = item.get("command") 91 if command: 92 debug('Running server command "{}" for view {}'.format(command, self.view.id())) 93 self.view.run_command("lsp_execute", {"command_name": command}) 94 95 96 class LspCompleteInsertTextCommand(LspCompleteCommand): 97 98 def run(self, edit: sublime.Edit, **item: Any) -> None: 99 insert_text = item.get("insertText") or item["label"] 100 if item.get("insertTextFormat", InsertTextFormat.PlainText) == InsertTextFormat.Snippet: 101 self.view.run_command("insert_snippet", {"contents": insert_text}) 102 else: 103 self.view.run_command("insert", {"characters": insert_text}) 104 self.epilogue(item) 105 106 107 class LspCompleteTextEditCommand(LspCompleteCommand): 108 109 def run(self, edit: sublime.Edit, **item: Any) -> None: 110 text_edit = item["textEdit"] 111 new_text = text_edit['newText'] 112 edit_region = range_to_region(Range.from_lsp(text_edit['range']), self.view) 113 if item.get("insertTextFormat", InsertTextFormat.PlainText) == InsertTextFormat.Snippet: 114 for region in self.translated_regions(edit_region): 115 self.view.erase(edit, region) 116 self.view.run_command("insert_snippet", {"contents": new_text}) 117 else: 118 for region in self.translated_regions(edit_region): 119 # NOTE: Cannot do .replace, because ST will select the replacement. 120 self.view.erase(edit, region) 121 self.view.insert(edit, region.a, new_text) 122 self.epilogue(item) 123 124 def translated_regions(self, edit_region: sublime.Region) -> Generator[sublime.Region, None, None]: 125 selection = self.view.sel() 126 primary_cursor_position = selection[0].b 127 for region in reversed(selection): 128 # For each selection region, apply the same removal as for the "primary" region. 129 # To do that, translate, or offset, the LSP edit region into the non-"primary" regions. 130 # The concept of "primary" is our own, and there is no mention of it in the LSP spec. 131 translation = region.b - primary_cursor_position 132 translated_edit_region = sublime.Region(edit_region.a + translation, edit_region.b + translation) 133 yield translated_edit_region 134 135 136 def resolve(completion_list: sublime.CompletionList, items: List[sublime.CompletionItem], flags: int = 0) -> None: 137 # Resolve the promise on the main thread to prevent any sort of data race for _set_target (see sublime_plugin.py). 138 sublime.set_timeout(lambda: completion_list.set_completions(items, flags)) 139 [end of plugin/completion.py] [start of plugin/execute_command.py] 1 import sublime 2 from .core.protocol import Request 3 from .core.registry import LspTextCommand 4 from .core.sessions import Session 5 from .core.typing import List, Optional, Dict, Any 6 from .core.views import uri_from_view, offset_to_point, region_to_range 7 8 9 class LspExecuteCommand(LspTextCommand): 10 11 capability = 'executeCommandProvider' 12 13 def run(self, 14 edit: sublime.Edit, 15 command_name: Optional[str] = None, 16 command_args: Optional[List[Any]] = None, 17 event: Optional[dict] = None) -> None: 18 session = self.best_session(self.capability) 19 if session and command_name: 20 window = self.view.window() 21 if window: 22 window.status_message("Running command {}".format(command_name)) 23 if command_args: 24 self._expand_variables(command_args) 25 self._send_command(session, command_name, command_args) 26 27 def _expand_variables(self, command_args: List[Any]) -> None: 28 region = self.view.sel()[0] 29 for i, arg in enumerate(command_args): 30 if arg in ["$file_uri", "${file_uri}"]: 31 command_args[i] = uri_from_view(self.view) 32 elif arg in ["$selection", "${selection}"]: 33 command_args[i] = self.view.substr(region) 34 elif arg in ["$offset", "${offset}"]: 35 command_args[i] = region.b 36 elif arg in ["$selection_begin", "${selection_begin}"]: 37 command_args[i] = region.begin() 38 elif arg in ["$selection_end", "${selection_end}"]: 39 command_args[i] = region.end() 40 elif arg in ["$position", "${position}"]: 41 command_args[i] = offset_to_point(self.view, region.b).to_lsp() 42 elif arg in ["$range", "${range}"]: 43 command_args[i] = region_to_range(self.view, region).to_lsp() 44 45 def _handle_response(self, command: str, response: Optional[Any]) -> None: 46 msg = "command {} completed".format(command) 47 if response: 48 msg += "with response: {}".format(response) 49 50 window = self.view.window() 51 if window: 52 window.status_message(msg) 53 54 def _handle_error(self, command: str, error: Dict[str, Any]) -> None: 55 msg = "command {} failed. Reason: {}".format(command, error.get("message", "none provided by server :(")) 56 sublime.message_dialog(msg) 57 58 def _send_command(self, session: Session, command_name: str, command_args: Optional[List[Any]]) -> None: 59 request = {"command": command_name, "arguments": command_args} if command_args else {"command": command_name} 60 session.send_request(Request.executeCommand(request), 61 lambda reponse: self._handle_response(command_name, reponse), 62 lambda error: self._handle_error(command_name, error)) 63 [end of plugin/execute_command.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugin/completion.py b/plugin/completion.py --- a/plugin/completion.py +++ b/plugin/completion.py @@ -90,7 +90,8 @@ command = item.get("command") if command: debug('Running server command "{}" for view {}'.format(command, self.view.id())) - self.view.run_command("lsp_execute", {"command_name": command}) + args = {"command_name": command["command"], "command_args": command.get("arguments")} + self.view.run_command("lsp_execute", args) class LspCompleteInsertTextCommand(LspCompleteCommand): diff --git a/plugin/execute_command.py b/plugin/execute_command.py --- a/plugin/execute_command.py +++ b/plugin/execute_command.py @@ -15,6 +15,11 @@ command_name: Optional[str] = None, command_args: Optional[List[Any]] = None, event: Optional[dict] = None) -> None: + # Handle VSCode-specific command for triggering suggestions popup. + if command_name == "editor.action.triggerSuggest": + # Triggered from set_timeout as suggestions popup doesn't trigger otherwise. + sublime.set_timeout(lambda: self.view.run_command("auto_complete")) + return session = self.best_session(self.capability) if session and command_name: window = self.view.window()
{"golden_diff": "diff --git a/plugin/completion.py b/plugin/completion.py\n--- a/plugin/completion.py\n+++ b/plugin/completion.py\n@@ -90,7 +90,8 @@\n command = item.get(\"command\")\n if command:\n debug('Running server command \"{}\" for view {}'.format(command, self.view.id()))\n- self.view.run_command(\"lsp_execute\", {\"command_name\": command})\n+ args = {\"command_name\": command[\"command\"], \"command_args\": command.get(\"arguments\")}\n+ self.view.run_command(\"lsp_execute\", args)\n \n \n class LspCompleteInsertTextCommand(LspCompleteCommand):\ndiff --git a/plugin/execute_command.py b/plugin/execute_command.py\n--- a/plugin/execute_command.py\n+++ b/plugin/execute_command.py\n@@ -15,6 +15,11 @@\n command_name: Optional[str] = None,\n command_args: Optional[List[Any]] = None,\n event: Optional[dict] = None) -> None:\n+ # Handle VSCode-specific command for triggering suggestions popup.\n+ if command_name == \"editor.action.triggerSuggest\":\n+ # Triggered from set_timeout as suggestions popup doesn't trigger otherwise.\n+ sublime.set_timeout(lambda: self.view.run_command(\"auto_complete\"))\n+ return\n session = self.best_session(self.capability)\n if session and command_name:\n window = self.view.window()\n", "issue": "Triggers completion command on the wrong session\nWhen having both LSP-css and LSP-stylelint running for open file (*.css), the completions are requested from LSP-css but the `command` that the completion item triggers runs on `LSP-stylelint` triggering an error dialog.\r\n\r\nAn example completion item looks like this:\r\n```\r\n {\r\n \"command\": {\r\n \"command\": \"editor.action.triggerSuggest\",\r\n \"title\": \"Suggest\"\r\n },\r\n \"documentation\": {\r\n \"kind\": \"plaintext\",\r\n \"value\": \"Specifies the width of the content area, padding area or border area (depending on 'box-sizing') of certain boxes.\\n\\nSyntax: <viewport-length>{1,2}\\n\\nMDN Reference: https://developer.mozilla.org/docs/Web/CSS/width\"\r\n },\r\n \"insertTextFormat\": 2,\r\n \"kind\": 10,\r\n \"label\": \"width\",\r\n \"sortText\": \"d_0000\",\r\n \"tags\": [],\r\n \"textEdit\": {\r\n \"newText\": \"width: $0;\",\r\n \"range\": {\r\n \"end\": {\r\n \"character\": 5,\r\n \"line\": 4\r\n },\r\n \"start\": {\r\n \"character\": 4,\r\n \"line\": 4\r\n }\r\n }\r\n }\r\n },\r\n```\r\n\r\nIt has a `command` that runs after inserting completion.\n", "before_files": [{"content": "import mdpopups\nimport sublime\nimport sublime_plugin\nimport webbrowser\nfrom .core.css import css\nfrom .core.logging import debug\nfrom .core.edit import parse_text_edit\nfrom .core.protocol import Request, InsertTextFormat, Range\nfrom .core.registry import LspTextCommand\nfrom .core.typing import Any, List, Dict, Optional, Generator, Union\nfrom .core.views import FORMAT_STRING, FORMAT_MARKUP_CONTENT, minihtml\nfrom .core.views import range_to_region\n\n\nclass LspResolveDocsCommand(LspTextCommand):\n\n completions = [] # type: List[Dict[str, Any]]\n\n def run(self, edit: sublime.Edit, index: int, event: Optional[dict] = None) -> None:\n item = self.completions[index]\n detail = self.format_documentation(item.get('detail') or \"\")\n documentation = self.format_documentation(item.get(\"documentation\") or \"\")\n # don't show the detail in the cooperate AC popup if it is already shown in the AC details filed.\n self.is_detail_shown = bool(detail)\n if not detail or not documentation:\n # To make sure that the detail or documentation fields doesn't exist we need to resove the completion item.\n # If those fields appear after the item is resolved we show them in the popup.\n session = self.best_session('completionProvider.resolveProvider')\n if session:\n session.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response)\n return\n minihtml_content = self.get_content(documentation, detail)\n self.show_popup(minihtml_content)\n\n def format_documentation(self, content: Union[str, Dict[str, str]]) -> str:\n return minihtml(self.view, content, allowed_formats=FORMAT_STRING | FORMAT_MARKUP_CONTENT)\n\n def get_content(self, documentation: str, detail: str) -> str:\n content = \"\"\n if detail and not self.is_detail_shown:\n content += \"<div class='highlight'>{}</div>\".format(detail)\n if documentation:\n content += \"<div>{}</div>\".format(documentation)\n return content\n\n def show_popup(self, minihtml_content: str) -> None:\n viewport_width = self.view.viewport_extent()[0]\n mdpopups.show_popup(\n self.view,\n minihtml_content,\n flags=sublime.COOPERATE_WITH_AUTO_COMPLETE,\n css=css().popups,\n wrapper_class=css().popups_classname,\n max_width=viewport_width,\n on_navigate=self.on_navigate\n )\n\n def on_navigate(self, url: str) -> None:\n webbrowser.open(url)\n\n def handle_resolve_response(self, item: Optional[dict]) -> None:\n detail = \"\"\n documentation = \"\"\n if item:\n detail = self.format_documentation(item.get('detail') or \"\")\n documentation = self.format_documentation(item.get(\"documentation\") or \"\")\n if not documentation:\n documentation = self.format_documentation({\"kind\": \"markdown\", \"value\": \"*No documentation available.*\"})\n minihtml_content = self.get_content(documentation, detail)\n show = self.update_popup if self.view.is_popup_visible() else self.show_popup\n # NOTE: Update/show popups from the main thread, or else the popup might make the AC widget disappear.\n sublime.set_timeout(lambda: show(minihtml_content))\n\n def update_popup(self, minihtml_content: str) -> None:\n mdpopups.update_popup(\n self.view,\n minihtml_content,\n css=css().popups,\n wrapper_class=css().popups_classname,\n )\n\n\nclass LspCompleteCommand(sublime_plugin.TextCommand):\n\n def epilogue(self, item: Dict[str, Any]) -> None:\n additional_edits = item.get('additionalTextEdits')\n if additional_edits:\n edits = [parse_text_edit(additional_edit) for additional_edit in additional_edits]\n self.view.run_command(\"lsp_apply_document_edit\", {'changes': edits})\n command = item.get(\"command\")\n if command:\n debug('Running server command \"{}\" for view {}'.format(command, self.view.id()))\n self.view.run_command(\"lsp_execute\", {\"command_name\": command})\n\n\nclass LspCompleteInsertTextCommand(LspCompleteCommand):\n\n def run(self, edit: sublime.Edit, **item: Any) -> None:\n insert_text = item.get(\"insertText\") or item[\"label\"]\n if item.get(\"insertTextFormat\", InsertTextFormat.PlainText) == InsertTextFormat.Snippet:\n self.view.run_command(\"insert_snippet\", {\"contents\": insert_text})\n else:\n self.view.run_command(\"insert\", {\"characters\": insert_text})\n self.epilogue(item)\n\n\nclass LspCompleteTextEditCommand(LspCompleteCommand):\n\n def run(self, edit: sublime.Edit, **item: Any) -> None:\n text_edit = item[\"textEdit\"]\n new_text = text_edit['newText']\n edit_region = range_to_region(Range.from_lsp(text_edit['range']), self.view)\n if item.get(\"insertTextFormat\", InsertTextFormat.PlainText) == InsertTextFormat.Snippet:\n for region in self.translated_regions(edit_region):\n self.view.erase(edit, region)\n self.view.run_command(\"insert_snippet\", {\"contents\": new_text})\n else:\n for region in self.translated_regions(edit_region):\n # NOTE: Cannot do .replace, because ST will select the replacement.\n self.view.erase(edit, region)\n self.view.insert(edit, region.a, new_text)\n self.epilogue(item)\n\n def translated_regions(self, edit_region: sublime.Region) -> Generator[sublime.Region, None, None]:\n selection = self.view.sel()\n primary_cursor_position = selection[0].b\n for region in reversed(selection):\n # For each selection region, apply the same removal as for the \"primary\" region.\n # To do that, translate, or offset, the LSP edit region into the non-\"primary\" regions.\n # The concept of \"primary\" is our own, and there is no mention of it in the LSP spec.\n translation = region.b - primary_cursor_position\n translated_edit_region = sublime.Region(edit_region.a + translation, edit_region.b + translation)\n yield translated_edit_region\n\n\ndef resolve(completion_list: sublime.CompletionList, items: List[sublime.CompletionItem], flags: int = 0) -> None:\n # Resolve the promise on the main thread to prevent any sort of data race for _set_target (see sublime_plugin.py).\n sublime.set_timeout(lambda: completion_list.set_completions(items, flags))\n", "path": "plugin/completion.py"}, {"content": "import sublime\nfrom .core.protocol import Request\nfrom .core.registry import LspTextCommand\nfrom .core.sessions import Session\nfrom .core.typing import List, Optional, Dict, Any\nfrom .core.views import uri_from_view, offset_to_point, region_to_range\n\n\nclass LspExecuteCommand(LspTextCommand):\n\n capability = 'executeCommandProvider'\n\n def run(self,\n edit: sublime.Edit,\n command_name: Optional[str] = None,\n command_args: Optional[List[Any]] = None,\n event: Optional[dict] = None) -> None:\n session = self.best_session(self.capability)\n if session and command_name:\n window = self.view.window()\n if window:\n window.status_message(\"Running command {}\".format(command_name))\n if command_args:\n self._expand_variables(command_args)\n self._send_command(session, command_name, command_args)\n\n def _expand_variables(self, command_args: List[Any]) -> None:\n region = self.view.sel()[0]\n for i, arg in enumerate(command_args):\n if arg in [\"$file_uri\", \"${file_uri}\"]:\n command_args[i] = uri_from_view(self.view)\n elif arg in [\"$selection\", \"${selection}\"]:\n command_args[i] = self.view.substr(region)\n elif arg in [\"$offset\", \"${offset}\"]:\n command_args[i] = region.b\n elif arg in [\"$selection_begin\", \"${selection_begin}\"]:\n command_args[i] = region.begin()\n elif arg in [\"$selection_end\", \"${selection_end}\"]:\n command_args[i] = region.end()\n elif arg in [\"$position\", \"${position}\"]:\n command_args[i] = offset_to_point(self.view, region.b).to_lsp()\n elif arg in [\"$range\", \"${range}\"]:\n command_args[i] = region_to_range(self.view, region).to_lsp()\n\n def _handle_response(self, command: str, response: Optional[Any]) -> None:\n msg = \"command {} completed\".format(command)\n if response:\n msg += \"with response: {}\".format(response)\n\n window = self.view.window()\n if window:\n window.status_message(msg)\n\n def _handle_error(self, command: str, error: Dict[str, Any]) -> None:\n msg = \"command {} failed. Reason: {}\".format(command, error.get(\"message\", \"none provided by server :(\"))\n sublime.message_dialog(msg)\n\n def _send_command(self, session: Session, command_name: str, command_args: Optional[List[Any]]) -> None:\n request = {\"command\": command_name, \"arguments\": command_args} if command_args else {\"command\": command_name}\n session.send_request(Request.executeCommand(request),\n lambda reponse: self._handle_response(command_name, reponse),\n lambda error: self._handle_error(command_name, error))\n", "path": "plugin/execute_command.py"}]}
3,298
298
gh_patches_debug_58021
rasdani/github-patches
git_diff
sopel-irc__sopel-949
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Problem in (at least) Wikipedia module: possibly Unicode related Hi, observe the following use case: https://en.wikipedia.org/wiki/Hir%C5%8D_Onoda @willie_5.4.1 KeyError: u'extract' (file "/usr/local/lib/python2.7/dist-packages/willie-5.4.1-py2.7.egg/willie/modules/wikipedia.py", line 89, in mw_snippet) </issue> <code> [start of sopel/modules/wikipedia.py] 1 # coding=utf-8 2 """ 3 wikipedia.py - Sopel Wikipedia Module 4 Copyright 2013 Edward Powell - embolalia.net 5 Licensed under the Eiffel Forum License 2. 6 7 http://sopel.chat 8 """ 9 from __future__ import unicode_literals, absolute_import, print_function, division 10 from sopel import web, tools 11 from sopel.config.types import StaticSection, ValidatedAttribute 12 from sopel.module import NOLIMIT, commands, example, rule 13 import json 14 import re 15 16 import sys 17 if sys.version_info.major < 3: 18 from urlparse import unquote 19 else: 20 from urllib.parse import unquote 21 22 REDIRECT = re.compile(r'^REDIRECT (.*)') 23 24 25 class WikipediaSection(StaticSection): 26 default_lang = ValidatedAttribute('default_lang', default='en') 27 """The default language to find articles from.""" 28 lang_per_channel = ValidatedAttribute('lang_per_channel') 29 30 31 def setup(bot): 32 bot.config.define_section('wikipedia', WikipediaSection) 33 34 regex = re.compile('([a-z]+).(wikipedia.org/wiki/)([^ ]+)') 35 if not bot.memory.contains('url_callbacks'): 36 bot.memory['url_callbacks'] = tools.SopelMemory() 37 bot.memory['url_callbacks'][regex] = mw_info 38 39 40 def configure(config): 41 config.define_section('wikipedia', WikipediaSection) 42 config.wikipedia.configure_setting( 43 'default_lang', 44 "Enter the default language to find articles from." 45 ) 46 47 48 def mw_search(server, query, num): 49 """ 50 Searches the specified MediaWiki server for the given query, and returns 51 the specified number of results. 52 """ 53 search_url = ('http://%s/w/api.php?format=json&action=query' 54 '&list=search&srlimit=%d&srprop=timestamp&srwhat=text' 55 '&srsearch=') % (server, num) 56 search_url += query 57 query = json.loads(web.get(search_url)) 58 if 'query' in query: 59 query = query['query']['search'] 60 return [r['title'] for r in query] 61 else: 62 return None 63 64 65 def say_snippet(bot, server, query, show_url=True): 66 page_name = query.replace('_', ' ') 67 query = query.replace(' ', '_') 68 snippet = mw_snippet(server, query) 69 msg = '[WIKIPEDIA] {} | "{}"'.format(page_name, snippet) 70 if show_url: 71 msg = msg + ' | https://{}/wiki/{}'.format(server, query) 72 bot.say(msg) 73 74 75 def mw_snippet(server, query): 76 """ 77 Retrives a snippet of the specified length from the given page on the given 78 server. 79 """ 80 snippet_url = ('https://' + server + '/w/api.php?format=json' 81 '&action=query&prop=extracts&exintro&explaintext' 82 '&exchars=300&redirects&titles=') 83 snippet_url += query 84 snippet = json.loads(web.get(snippet_url)) 85 snippet = snippet['query']['pages'] 86 87 # For some reason, the API gives the page *number* as the key, so we just 88 # grab the first page number in the results. 89 snippet = snippet[list(snippet.keys())[0]] 90 91 return snippet['extract'] 92 93 94 @rule('.*/([a-z]+\.wikipedia.org)/wiki/([^ ]+).*') 95 def mw_info(bot, trigger, found_match=None): 96 """ 97 Retrives a snippet of the specified length from the given page on the given 98 server. 99 """ 100 match = found_match or trigger 101 say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False) 102 103 104 @commands('w', 'wiki', 'wik') 105 @example('.w San Francisco') 106 def wikipedia(bot, trigger): 107 lang = bot.config.wikipedia.default_lang 108 109 #change lang if channel has custom language set 110 if (trigger.sender and not trigger.sender.is_nick() and 111 bot.config.wikipedia.lang_per_channel): 112 customlang = re.search('(' + trigger.sender + '):(\w+)', 113 bot.config.wikipedia.lang_per_channel) 114 if customlang is not None: 115 lang = customlang.group(2) 116 117 if trigger.group(2) is None: 118 bot.reply("What do you want me to look up?") 119 return NOLIMIT 120 121 query = trigger.group(2) 122 args = re.search(r'^-([a-z]{2,12})\s(.*)', query) 123 if args is not None: 124 lang = args.group(1) 125 query = args.group(2) 126 127 if not query: 128 bot.reply('What do you want me to look up?') 129 return NOLIMIT 130 server = lang + '.wikipedia.org' 131 query = mw_search(server, query, 1) 132 if not query: 133 bot.reply("I can't find any results for that.") 134 return NOLIMIT 135 else: 136 query = query[0] 137 say_snippet(bot, server, query) 138 [end of sopel/modules/wikipedia.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sopel/modules/wikipedia.py b/sopel/modules/wikipedia.py --- a/sopel/modules/wikipedia.py +++ b/sopel/modules/wikipedia.py @@ -15,7 +15,8 @@ import sys if sys.version_info.major < 3: - from urlparse import unquote + from urlparse import unquote as _unquote + unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8') else: from urllib.parse import unquote
{"golden_diff": "diff --git a/sopel/modules/wikipedia.py b/sopel/modules/wikipedia.py\n--- a/sopel/modules/wikipedia.py\n+++ b/sopel/modules/wikipedia.py\n@@ -15,7 +15,8 @@\n \n import sys\n if sys.version_info.major < 3:\n- from urlparse import unquote\n+ from urlparse import unquote as _unquote\n+ unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')\n else:\n from urllib.parse import unquote\n", "issue": "Problem in (at least) Wikipedia module: possibly Unicode related\nHi,\nobserve the following use case:\n https://en.wikipedia.org/wiki/Hir%C5%8D_Onoda\n @willie_5.4.1 KeyError: u'extract' (file \"/usr/local/lib/python2.7/dist-packages/willie-5.4.1-py2.7.egg/willie/modules/wikipedia.py\", line 89, in mw_snippet)\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nwikipedia.py - Sopel Wikipedia Module\nCopyright 2013 Edward Powell - embolalia.net\nLicensed under the Eiffel Forum License 2.\n\nhttp://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\nfrom sopel import web, tools\nfrom sopel.config.types import StaticSection, ValidatedAttribute\nfrom sopel.module import NOLIMIT, commands, example, rule\nimport json\nimport re\n\nimport sys\nif sys.version_info.major < 3:\n from urlparse import unquote\nelse:\n from urllib.parse import unquote\n\nREDIRECT = re.compile(r'^REDIRECT (.*)')\n\n\nclass WikipediaSection(StaticSection):\n default_lang = ValidatedAttribute('default_lang', default='en')\n \"\"\"The default language to find articles from.\"\"\"\n lang_per_channel = ValidatedAttribute('lang_per_channel')\n\n\ndef setup(bot):\n bot.config.define_section('wikipedia', WikipediaSection)\n\n regex = re.compile('([a-z]+).(wikipedia.org/wiki/)([^ ]+)')\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n bot.memory['url_callbacks'][regex] = mw_info\n\n\ndef configure(config):\n config.define_section('wikipedia', WikipediaSection)\n config.wikipedia.configure_setting(\n 'default_lang',\n \"Enter the default language to find articles from.\"\n )\n\n\ndef mw_search(server, query, num):\n \"\"\"\n Searches the specified MediaWiki server for the given query, and returns\n the specified number of results.\n \"\"\"\n search_url = ('http://%s/w/api.php?format=json&action=query'\n '&list=search&srlimit=%d&srprop=timestamp&srwhat=text'\n '&srsearch=') % (server, num)\n search_url += query\n query = json.loads(web.get(search_url))\n if 'query' in query:\n query = query['query']['search']\n return [r['title'] for r in query]\n else:\n return None\n\n\ndef say_snippet(bot, server, query, show_url=True):\n page_name = query.replace('_', ' ')\n query = query.replace(' ', '_')\n snippet = mw_snippet(server, query)\n msg = '[WIKIPEDIA] {} | \"{}\"'.format(page_name, snippet)\n if show_url:\n msg = msg + ' | https://{}/wiki/{}'.format(server, query)\n bot.say(msg)\n\n\ndef mw_snippet(server, query):\n \"\"\"\n Retrives a snippet of the specified length from the given page on the given\n server.\n \"\"\"\n snippet_url = ('https://' + server + '/w/api.php?format=json'\n '&action=query&prop=extracts&exintro&explaintext'\n '&exchars=300&redirects&titles=')\n snippet_url += query\n snippet = json.loads(web.get(snippet_url))\n snippet = snippet['query']['pages']\n\n # For some reason, the API gives the page *number* as the key, so we just\n # grab the first page number in the results.\n snippet = snippet[list(snippet.keys())[0]]\n\n return snippet['extract']\n\n\n@rule('.*/([a-z]+\\.wikipedia.org)/wiki/([^ ]+).*')\ndef mw_info(bot, trigger, found_match=None):\n \"\"\"\n Retrives a snippet of the specified length from the given page on the given\n server.\n \"\"\"\n match = found_match or trigger\n say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)\n\n\n@commands('w', 'wiki', 'wik')\n@example('.w San Francisco')\ndef wikipedia(bot, trigger):\n lang = bot.config.wikipedia.default_lang\n\n #change lang if channel has custom language set\n if (trigger.sender and not trigger.sender.is_nick() and\n bot.config.wikipedia.lang_per_channel):\n customlang = re.search('(' + trigger.sender + '):(\\w+)',\n bot.config.wikipedia.lang_per_channel)\n if customlang is not None:\n lang = customlang.group(2)\n\n if trigger.group(2) is None:\n bot.reply(\"What do you want me to look up?\")\n return NOLIMIT\n\n query = trigger.group(2)\n args = re.search(r'^-([a-z]{2,12})\\s(.*)', query)\n if args is not None:\n lang = args.group(1)\n query = args.group(2)\n\n if not query:\n bot.reply('What do you want me to look up?')\n return NOLIMIT\n server = lang + '.wikipedia.org'\n query = mw_search(server, query, 1)\n if not query:\n bot.reply(\"I can't find any results for that.\")\n return NOLIMIT\n else:\n query = query[0]\n say_snippet(bot, server, query)\n", "path": "sopel/modules/wikipedia.py"}]}
2,028
119
gh_patches_debug_585
rasdani/github-patches
git_diff
pex-tool__pex-1679
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.73 On the docket: + [x] Unexpected distribution hash #1683 + [x] Pex fails to parse wheel tags correctly when resolving from a lock. #1676 + [x] `pex3 lock create --style universal` does not fully patch ambient interpreter properties. #1681 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.72" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.72" +__version__ = "2.1.73"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.72\"\n+__version__ = \"2.1.73\"\n", "issue": "Release 2.1.73\nOn the docket:\r\n+ [x] Unexpected distribution hash #1683 \r\n+ [x] Pex fails to parse wheel tags correctly when resolving from a lock. #1676 \r\n+ [x] `pex3 lock create --style universal` does not fully patch ambient interpreter properties. #1681 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.72\"\n", "path": "pex/version.py"}]}
661
96
gh_patches_debug_40938
rasdani/github-patches
git_diff
Cog-Creators__Red-DiscordBot-3911
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Mod cog sends owner notifications on fresh install. # Other bugs I got reminded about it when I saw a fix for #3587. Mod cog sends owner notifications about `[p]moveignoredchannels` and `[p]movedeletedelay` on fresh Red installs. Only viable solution seems to be looping through all guild settings and only send the message if `delete_delay` has been changed from the default in at least one of them though I'm basing that on my comment [here](https://github.com/Cog-Creators/Red-DiscordBot/pull/3638#discussion_r392119234). </issue> <code> [start of redbot/cogs/mod/mod.py] 1 import asyncio 2 import logging 3 import re 4 from abc import ABC 5 from collections import defaultdict 6 from typing import List, Tuple 7 8 import discord 9 from redbot.core import Config, modlog, commands 10 from redbot.core.bot import Red 11 from redbot.core.i18n import Translator, cog_i18n 12 from redbot.core.utils._internal_utils import send_to_owners_with_prefix_replaced 13 from .casetypes import CASETYPES 14 from .events import Events 15 from .kickban import KickBanMixin 16 from .mutes import MuteMixin 17 from .names import ModInfo 18 from .slowmode import Slowmode 19 from .settings import ModSettings 20 21 _ = T_ = Translator("Mod", __file__) 22 23 __version__ = "1.2.0" 24 25 26 class CompositeMetaClass(type(commands.Cog), type(ABC)): 27 """ 28 This allows the metaclass used for proper type detection to 29 coexist with discord.py's metaclass 30 """ 31 32 pass 33 34 35 @cog_i18n(_) 36 class Mod( 37 ModSettings, 38 Events, 39 KickBanMixin, 40 MuteMixin, 41 ModInfo, 42 Slowmode, 43 commands.Cog, 44 metaclass=CompositeMetaClass, 45 ): 46 """Moderation tools.""" 47 48 default_global_settings = {"version": ""} 49 50 default_guild_settings = { 51 "ban_mention_spam": False, 52 "delete_repeats": -1, 53 "ignored": False, 54 "respect_hierarchy": True, 55 "delete_delay": -1, 56 "reinvite_on_unban": False, 57 "current_tempbans": [], 58 "dm_on_kickban": False, 59 "default_days": 0, 60 } 61 62 default_channel_settings = {"ignored": False} 63 64 default_member_settings = {"past_nicks": [], "perms_cache": {}, "banned_until": False} 65 66 default_user_settings = {"past_names": []} 67 68 def __init__(self, bot: Red): 69 super().__init__() 70 self.bot = bot 71 72 self.config = Config.get_conf(self, 4961522000, force_registration=True) 73 self.config.register_global(**self.default_global_settings) 74 self.config.register_guild(**self.default_guild_settings) 75 self.config.register_channel(**self.default_channel_settings) 76 self.config.register_member(**self.default_member_settings) 77 self.config.register_user(**self.default_user_settings) 78 self.cache: dict = {} 79 self.tban_expiry_task = self.bot.loop.create_task(self.check_tempban_expirations()) 80 self.last_case: dict = defaultdict(dict) 81 82 self._ready = asyncio.Event() 83 84 async def initialize(self): 85 await self._maybe_update_config() 86 self._ready.set() 87 88 async def cog_before_invoke(self, ctx: commands.Context) -> None: 89 await self._ready.wait() 90 91 def cog_unload(self): 92 self.tban_expiry_task.cancel() 93 94 async def _maybe_update_config(self): 95 """Maybe update `delete_delay` value set by Config prior to Mod 1.0.0.""" 96 if not await self.config.version(): 97 guild_dict = await self.config.all_guilds() 98 for guild_id, info in guild_dict.items(): 99 delete_repeats = info.get("delete_repeats", False) 100 if delete_repeats: 101 val = 3 102 else: 103 val = -1 104 await self.config.guild(discord.Object(id=guild_id)).delete_repeats.set(val) 105 await self.config.version.set("1.0.0") # set version of last update 106 if await self.config.version() < "1.1.0": 107 msg = _( 108 "Ignored guilds and channels have been moved. " 109 "Please use `[p]moveignoredchannels` if " 110 "you were previously using these functions." 111 ) 112 self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg)) 113 await self.config.version.set("1.1.0") 114 if await self.config.version() < "1.2.0": 115 msg = _( 116 "Delete delay settings have been moved. " 117 "Please use `[p]movedeletedelay` if " 118 "you were previously using these functions." 119 ) 120 self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg)) 121 await self.config.version.set("1.2.0") 122 123 @commands.command() 124 @commands.is_owner() 125 async def moveignoredchannels(self, ctx: commands.Context) -> None: 126 """Move ignored channels and servers to core""" 127 all_guilds = await self.config.all_guilds() 128 all_channels = await self.config.all_channels() 129 for guild_id, settings in all_guilds.items(): 130 await self.bot._config.guild_from_id(guild_id).ignored.set(settings["ignored"]) 131 await self.config.guild_from_id(guild_id).ignored.clear() 132 for channel_id, settings in all_channels.items(): 133 await self.bot._config.channel_from_id(channel_id).ignored.set(settings["ignored"]) 134 await self.config.channel_from_id(channel_id).clear() 135 await ctx.send(_("Ignored channels and guilds restored.")) 136 137 @commands.command() 138 @commands.is_owner() 139 async def movedeletedelay(self, ctx: commands.Context) -> None: 140 """ 141 Move deletedelay settings to core 142 """ 143 all_guilds = await self.config.all_guilds() 144 for guild_id, settings in all_guilds.items(): 145 await self.bot._config.guild_from_id(guild_id).delete_delay.set( 146 settings["delete_delay"] 147 ) 148 await self.config.guild_from_id(guild_id).delete_delay.clear() 149 await ctx.send(_("Delete delay settings restored.")) 150 [end of redbot/cogs/mod/mod.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redbot/cogs/mod/mod.py b/redbot/cogs/mod/mod.py --- a/redbot/cogs/mod/mod.py +++ b/redbot/cogs/mod/mod.py @@ -6,6 +6,8 @@ from typing import List, Tuple import discord +from redbot.core.utils import AsyncIter + from redbot.core import Config, modlog, commands from redbot.core.bot import Red from redbot.core.i18n import Translator, cog_i18n @@ -95,7 +97,7 @@ """Maybe update `delete_delay` value set by Config prior to Mod 1.0.0.""" if not await self.config.version(): guild_dict = await self.config.all_guilds() - for guild_id, info in guild_dict.items(): + async for guild_id, info in AsyncIter(guild_dict.items(), steps=25): delete_repeats = info.get("delete_repeats", False) if delete_repeats: val = 3 @@ -104,20 +106,37 @@ await self.config.guild(discord.Object(id=guild_id)).delete_repeats.set(val) await self.config.version.set("1.0.0") # set version of last update if await self.config.version() < "1.1.0": - msg = _( - "Ignored guilds and channels have been moved. " - "Please use `[p]moveignoredchannels` if " - "you were previously using these functions." - ) - self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg)) + message_sent = False + async for e in AsyncIter((await self.config.all_channels()).values(), steps=25): + if e["ignored"] is not False: + msg = _( + "Ignored guilds and channels have been moved. " + "Please use `[p]moveignoredchannels` to migrate the old settings." + ) + self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg)) + message_sent = True + break + if message_sent is False: + async for e in AsyncIter((await self.config.all_guilds()).values(), steps=25): + if e["ignored"] is not False: + msg = _( + "Ignored guilds and channels have been moved. " + "Please use `[p]moveignoredchannels` to migrate the old settings." + ) + self.bot.loop.create_task( + send_to_owners_with_prefix_replaced(self.bot, msg) + ) + break await self.config.version.set("1.1.0") if await self.config.version() < "1.2.0": - msg = _( - "Delete delay settings have been moved. " - "Please use `[p]movedeletedelay` if " - "you were previously using these functions." - ) - self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg)) + async for e in AsyncIter((await self.config.all_guilds()).values(), steps=25): + if e["delete_delay"] != -1: + msg = _( + "Delete delay settings have been moved. " + "Please use `[p]movedeletedelay` to migrate the old settings." + ) + self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg)) + break await self.config.version.set("1.2.0") @commands.command()
{"golden_diff": "diff --git a/redbot/cogs/mod/mod.py b/redbot/cogs/mod/mod.py\n--- a/redbot/cogs/mod/mod.py\n+++ b/redbot/cogs/mod/mod.py\n@@ -6,6 +6,8 @@\n from typing import List, Tuple\n \n import discord\n+from redbot.core.utils import AsyncIter\n+\n from redbot.core import Config, modlog, commands\n from redbot.core.bot import Red\n from redbot.core.i18n import Translator, cog_i18n\n@@ -95,7 +97,7 @@\n \"\"\"Maybe update `delete_delay` value set by Config prior to Mod 1.0.0.\"\"\"\n if not await self.config.version():\n guild_dict = await self.config.all_guilds()\n- for guild_id, info in guild_dict.items():\n+ async for guild_id, info in AsyncIter(guild_dict.items(), steps=25):\n delete_repeats = info.get(\"delete_repeats\", False)\n if delete_repeats:\n val = 3\n@@ -104,20 +106,37 @@\n await self.config.guild(discord.Object(id=guild_id)).delete_repeats.set(val)\n await self.config.version.set(\"1.0.0\") # set version of last update\n if await self.config.version() < \"1.1.0\":\n- msg = _(\n- \"Ignored guilds and channels have been moved. \"\n- \"Please use `[p]moveignoredchannels` if \"\n- \"you were previously using these functions.\"\n- )\n- self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg))\n+ message_sent = False\n+ async for e in AsyncIter((await self.config.all_channels()).values(), steps=25):\n+ if e[\"ignored\"] is not False:\n+ msg = _(\n+ \"Ignored guilds and channels have been moved. \"\n+ \"Please use `[p]moveignoredchannels` to migrate the old settings.\"\n+ )\n+ self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg))\n+ message_sent = True\n+ break\n+ if message_sent is False:\n+ async for e in AsyncIter((await self.config.all_guilds()).values(), steps=25):\n+ if e[\"ignored\"] is not False:\n+ msg = _(\n+ \"Ignored guilds and channels have been moved. \"\n+ \"Please use `[p]moveignoredchannels` to migrate the old settings.\"\n+ )\n+ self.bot.loop.create_task(\n+ send_to_owners_with_prefix_replaced(self.bot, msg)\n+ )\n+ break\n await self.config.version.set(\"1.1.0\")\n if await self.config.version() < \"1.2.0\":\n- msg = _(\n- \"Delete delay settings have been moved. \"\n- \"Please use `[p]movedeletedelay` if \"\n- \"you were previously using these functions.\"\n- )\n- self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg))\n+ async for e in AsyncIter((await self.config.all_guilds()).values(), steps=25):\n+ if e[\"delete_delay\"] != -1:\n+ msg = _(\n+ \"Delete delay settings have been moved. \"\n+ \"Please use `[p]movedeletedelay` to migrate the old settings.\"\n+ )\n+ self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg))\n+ break\n await self.config.version.set(\"1.2.0\")\n \n @commands.command()\n", "issue": "Mod cog sends owner notifications on fresh install.\n# Other bugs\r\n\r\nI got reminded about it when I saw a fix for #3587. Mod cog sends owner notifications about `[p]moveignoredchannels` and `[p]movedeletedelay` on fresh Red installs. Only viable solution seems to be looping through all guild settings and only send the message if `delete_delay` has been changed from the default in at least one of them though I'm basing that on my comment [here](https://github.com/Cog-Creators/Red-DiscordBot/pull/3638#discussion_r392119234).\r\n\n", "before_files": [{"content": "import asyncio\nimport logging\nimport re\nfrom abc import ABC\nfrom collections import defaultdict\nfrom typing import List, Tuple\n\nimport discord\nfrom redbot.core import Config, modlog, commands\nfrom redbot.core.bot import Red\nfrom redbot.core.i18n import Translator, cog_i18n\nfrom redbot.core.utils._internal_utils import send_to_owners_with_prefix_replaced\nfrom .casetypes import CASETYPES\nfrom .events import Events\nfrom .kickban import KickBanMixin\nfrom .mutes import MuteMixin\nfrom .names import ModInfo\nfrom .slowmode import Slowmode\nfrom .settings import ModSettings\n\n_ = T_ = Translator(\"Mod\", __file__)\n\n__version__ = \"1.2.0\"\n\n\nclass CompositeMetaClass(type(commands.Cog), type(ABC)):\n \"\"\"\n This allows the metaclass used for proper type detection to\n coexist with discord.py's metaclass\n \"\"\"\n\n pass\n\n\n@cog_i18n(_)\nclass Mod(\n ModSettings,\n Events,\n KickBanMixin,\n MuteMixin,\n ModInfo,\n Slowmode,\n commands.Cog,\n metaclass=CompositeMetaClass,\n):\n \"\"\"Moderation tools.\"\"\"\n\n default_global_settings = {\"version\": \"\"}\n\n default_guild_settings = {\n \"ban_mention_spam\": False,\n \"delete_repeats\": -1,\n \"ignored\": False,\n \"respect_hierarchy\": True,\n \"delete_delay\": -1,\n \"reinvite_on_unban\": False,\n \"current_tempbans\": [],\n \"dm_on_kickban\": False,\n \"default_days\": 0,\n }\n\n default_channel_settings = {\"ignored\": False}\n\n default_member_settings = {\"past_nicks\": [], \"perms_cache\": {}, \"banned_until\": False}\n\n default_user_settings = {\"past_names\": []}\n\n def __init__(self, bot: Red):\n super().__init__()\n self.bot = bot\n\n self.config = Config.get_conf(self, 4961522000, force_registration=True)\n self.config.register_global(**self.default_global_settings)\n self.config.register_guild(**self.default_guild_settings)\n self.config.register_channel(**self.default_channel_settings)\n self.config.register_member(**self.default_member_settings)\n self.config.register_user(**self.default_user_settings)\n self.cache: dict = {}\n self.tban_expiry_task = self.bot.loop.create_task(self.check_tempban_expirations())\n self.last_case: dict = defaultdict(dict)\n\n self._ready = asyncio.Event()\n\n async def initialize(self):\n await self._maybe_update_config()\n self._ready.set()\n\n async def cog_before_invoke(self, ctx: commands.Context) -> None:\n await self._ready.wait()\n\n def cog_unload(self):\n self.tban_expiry_task.cancel()\n\n async def _maybe_update_config(self):\n \"\"\"Maybe update `delete_delay` value set by Config prior to Mod 1.0.0.\"\"\"\n if not await self.config.version():\n guild_dict = await self.config.all_guilds()\n for guild_id, info in guild_dict.items():\n delete_repeats = info.get(\"delete_repeats\", False)\n if delete_repeats:\n val = 3\n else:\n val = -1\n await self.config.guild(discord.Object(id=guild_id)).delete_repeats.set(val)\n await self.config.version.set(\"1.0.0\") # set version of last update\n if await self.config.version() < \"1.1.0\":\n msg = _(\n \"Ignored guilds and channels have been moved. \"\n \"Please use `[p]moveignoredchannels` if \"\n \"you were previously using these functions.\"\n )\n self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg))\n await self.config.version.set(\"1.1.0\")\n if await self.config.version() < \"1.2.0\":\n msg = _(\n \"Delete delay settings have been moved. \"\n \"Please use `[p]movedeletedelay` if \"\n \"you were previously using these functions.\"\n )\n self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg))\n await self.config.version.set(\"1.2.0\")\n\n @commands.command()\n @commands.is_owner()\n async def moveignoredchannels(self, ctx: commands.Context) -> None:\n \"\"\"Move ignored channels and servers to core\"\"\"\n all_guilds = await self.config.all_guilds()\n all_channels = await self.config.all_channels()\n for guild_id, settings in all_guilds.items():\n await self.bot._config.guild_from_id(guild_id).ignored.set(settings[\"ignored\"])\n await self.config.guild_from_id(guild_id).ignored.clear()\n for channel_id, settings in all_channels.items():\n await self.bot._config.channel_from_id(channel_id).ignored.set(settings[\"ignored\"])\n await self.config.channel_from_id(channel_id).clear()\n await ctx.send(_(\"Ignored channels and guilds restored.\"))\n\n @commands.command()\n @commands.is_owner()\n async def movedeletedelay(self, ctx: commands.Context) -> None:\n \"\"\"\n Move deletedelay settings to core\n \"\"\"\n all_guilds = await self.config.all_guilds()\n for guild_id, settings in all_guilds.items():\n await self.bot._config.guild_from_id(guild_id).delete_delay.set(\n settings[\"delete_delay\"]\n )\n await self.config.guild_from_id(guild_id).delete_delay.clear()\n await ctx.send(_(\"Delete delay settings restored.\"))\n", "path": "redbot/cogs/mod/mod.py"}]}
2,245
800
gh_patches_debug_577
rasdani/github-patches
git_diff
numba__numba-1356
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use CPython allocator in NRT NRT should optionally use the CPython memory allocation functions (when imported from CPython). This would allow Numba-allocated memory to be seen by other utilities such as `sys.getallocatedblocks()`, `sys.debugmallocstats()`, and `tracemalloc`. </issue> <code> [start of numba/runtime/nrt.py] 1 from __future__ import print_function, absolute_import, division 2 3 from collections import namedtuple 4 5 from . import atomicops 6 from llvmlite import binding as ll 7 8 from numba.utils import finalize as _finalize 9 from . import _nrt_python as _nrt 10 11 _nrt_mstats = namedtuple("nrt_mstats", ["alloc", "free", "mi_alloc", "mi_free"]) 12 13 14 class _Runtime(object): 15 def __init__(self): 16 self._init = False 17 18 def initialize(self, ctx): 19 """Initializes the NRT 20 21 Must be called before any actual call to the NRT API. 22 Safe to be called multiple times. 23 """ 24 if self._init: 25 # Already initialized 26 return 27 28 # Register globals into the system 29 for py_name in _nrt.c_helpers: 30 c_name = "NRT_" + py_name 31 c_address = _nrt.c_helpers[py_name] 32 ll.add_symbol(c_name, c_address) 33 34 # Compile atomic operations 35 self._library = atomicops.compile_nrt_functions(ctx) 36 37 self._ptr_inc = self._library.get_pointer_to_function("nrt_atomic_add") 38 self._ptr_dec = self._library.get_pointer_to_function("nrt_atomic_sub") 39 self._ptr_cas = self._library.get_pointer_to_function("nrt_atomic_cas") 40 41 # Install atomic ops to NRT 42 _nrt.memsys_set_atomic_inc_dec(self._ptr_inc, self._ptr_dec) 43 _nrt.memsys_set_atomic_cas(self._ptr_cas) 44 45 self._init = True 46 47 @staticmethod 48 def shutdown(): 49 """ 50 Shutdown the NRT 51 Safe to be called without calling Runtime.initialize first 52 """ 53 _nrt.memsys_shutdown() 54 55 @property 56 def library(self): 57 """ 58 Return the Library object containing the various NRT functions. 59 """ 60 return self._library 61 62 def meminfo_new(self, data, pyobj): 63 """ 64 Returns a MemInfo object that tracks memory at `data` owned by `pyobj`. 65 MemInfo will acquire a reference on `pyobj`. 66 The release of MemInfo will release a reference on `pyobj`. 67 """ 68 mi = _nrt.meminfo_new(data, pyobj) 69 return MemInfo(mi) 70 71 def meminfo_alloc(self, size, safe=False): 72 """ 73 Allocate a new memory of `size` bytes and returns a MemInfo object 74 that tracks the allocation. When there is no more reference to the 75 MemInfo object, the underlying memory will be deallocated. 76 77 If `safe` flag is True, the memory is allocated using the `safe` scheme. 78 This is used for debugging and testing purposes. 79 See `NRT_MemInfo_alloc_safe()` in "nrt.h" for details. 80 """ 81 if safe: 82 mi = _nrt.meminfo_alloc_safe(size) 83 else: 84 mi = _nrt.meminfo_alloc(size) 85 return MemInfo(mi) 86 87 def get_allocation_stats(self): 88 """ 89 Returns a namedtuple of (alloc, free, mi_alloc, mi_free) for count of 90 each memory operations. 91 """ 92 return _nrt_mstats(alloc=_nrt.memsys_get_stats_alloc(), 93 free=_nrt.memsys_get_stats_free(), 94 mi_alloc=_nrt.memsys_get_stats_mi_alloc(), 95 mi_free=_nrt.memsys_get_stats_mi_free()) 96 97 98 # Alias to _nrt_python._MemInfo 99 MemInfo = _nrt._MemInfo 100 101 # Create uninitialized runtime 102 rtsys = _Runtime() 103 104 # Install finalizer 105 _finalize(rtsys, _Runtime.shutdown) 106 107 # Avoid future use of the class 108 del _Runtime 109 [end of numba/runtime/nrt.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/numba/runtime/nrt.py b/numba/runtime/nrt.py --- a/numba/runtime/nrt.py +++ b/numba/runtime/nrt.py @@ -98,7 +98,8 @@ # Alias to _nrt_python._MemInfo MemInfo = _nrt._MemInfo -# Create uninitialized runtime +# Create runtime +_nrt.memsys_use_cpython_allocator() rtsys = _Runtime() # Install finalizer
{"golden_diff": "diff --git a/numba/runtime/nrt.py b/numba/runtime/nrt.py\n--- a/numba/runtime/nrt.py\n+++ b/numba/runtime/nrt.py\n@@ -98,7 +98,8 @@\n # Alias to _nrt_python._MemInfo\n MemInfo = _nrt._MemInfo\n \n-# Create uninitialized runtime\n+# Create runtime\n+_nrt.memsys_use_cpython_allocator()\n rtsys = _Runtime()\n \n # Install finalizer\n", "issue": "Use CPython allocator in NRT\nNRT should optionally use the CPython memory allocation functions (when imported from CPython). This would allow Numba-allocated memory to be seen by other utilities such as `sys.getallocatedblocks()`, `sys.debugmallocstats()`, and `tracemalloc`.\n\n", "before_files": [{"content": "from __future__ import print_function, absolute_import, division\n\nfrom collections import namedtuple\n\nfrom . import atomicops\nfrom llvmlite import binding as ll\n\nfrom numba.utils import finalize as _finalize\nfrom . import _nrt_python as _nrt\n\n_nrt_mstats = namedtuple(\"nrt_mstats\", [\"alloc\", \"free\", \"mi_alloc\", \"mi_free\"])\n\n\nclass _Runtime(object):\n def __init__(self):\n self._init = False\n\n def initialize(self, ctx):\n \"\"\"Initializes the NRT\n\n Must be called before any actual call to the NRT API.\n Safe to be called multiple times.\n \"\"\"\n if self._init:\n # Already initialized\n return\n\n # Register globals into the system\n for py_name in _nrt.c_helpers:\n c_name = \"NRT_\" + py_name\n c_address = _nrt.c_helpers[py_name]\n ll.add_symbol(c_name, c_address)\n\n # Compile atomic operations\n self._library = atomicops.compile_nrt_functions(ctx)\n\n self._ptr_inc = self._library.get_pointer_to_function(\"nrt_atomic_add\")\n self._ptr_dec = self._library.get_pointer_to_function(\"nrt_atomic_sub\")\n self._ptr_cas = self._library.get_pointer_to_function(\"nrt_atomic_cas\")\n\n # Install atomic ops to NRT\n _nrt.memsys_set_atomic_inc_dec(self._ptr_inc, self._ptr_dec)\n _nrt.memsys_set_atomic_cas(self._ptr_cas)\n\n self._init = True\n\n @staticmethod\n def shutdown():\n \"\"\"\n Shutdown the NRT\n Safe to be called without calling Runtime.initialize first\n \"\"\"\n _nrt.memsys_shutdown()\n\n @property\n def library(self):\n \"\"\"\n Return the Library object containing the various NRT functions.\n \"\"\"\n return self._library\n\n def meminfo_new(self, data, pyobj):\n \"\"\"\n Returns a MemInfo object that tracks memory at `data` owned by `pyobj`.\n MemInfo will acquire a reference on `pyobj`.\n The release of MemInfo will release a reference on `pyobj`.\n \"\"\"\n mi = _nrt.meminfo_new(data, pyobj)\n return MemInfo(mi)\n\n def meminfo_alloc(self, size, safe=False):\n \"\"\"\n Allocate a new memory of `size` bytes and returns a MemInfo object\n that tracks the allocation. When there is no more reference to the\n MemInfo object, the underlying memory will be deallocated.\n\n If `safe` flag is True, the memory is allocated using the `safe` scheme.\n This is used for debugging and testing purposes.\n See `NRT_MemInfo_alloc_safe()` in \"nrt.h\" for details.\n \"\"\"\n if safe:\n mi = _nrt.meminfo_alloc_safe(size)\n else:\n mi = _nrt.meminfo_alloc(size)\n return MemInfo(mi)\n\n def get_allocation_stats(self):\n \"\"\"\n Returns a namedtuple of (alloc, free, mi_alloc, mi_free) for count of\n each memory operations.\n \"\"\"\n return _nrt_mstats(alloc=_nrt.memsys_get_stats_alloc(),\n free=_nrt.memsys_get_stats_free(),\n mi_alloc=_nrt.memsys_get_stats_mi_alloc(),\n mi_free=_nrt.memsys_get_stats_mi_free())\n\n\n# Alias to _nrt_python._MemInfo\nMemInfo = _nrt._MemInfo\n\n# Create uninitialized runtime\nrtsys = _Runtime()\n\n# Install finalizer\n_finalize(rtsys, _Runtime.shutdown)\n\n# Avoid future use of the class\ndel _Runtime\n", "path": "numba/runtime/nrt.py"}]}
1,632
106
gh_patches_debug_36848
rasdani/github-patches
git_diff
pwndbg__pwndbg-1920
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The `ctx threads` (or `threads`) should display all threads no matter of context threads limit cc: @CptGibbon we should probably add this option for convenience :) </issue> <code> [start of pwndbg/commands/tls.py] 1 """ 2 Command to print the information of the current Thread Local Storage (TLS). 3 """ 4 from __future__ import annotations 5 6 import argparse 7 8 import pwndbg.commands 9 import pwndbg.gdblib.tls 10 from pwndbg.color import message 11 from pwndbg.commands import CommandCategory 12 13 parser = argparse.ArgumentParser( 14 formatter_class=argparse.RawTextHelpFormatter, 15 description="Print out base address of the current Thread Local Storage (TLS).", 16 ) 17 18 parser.add_argument( 19 "-p", 20 "--pthread-self", 21 action="store_true", 22 default=False, 23 help="Try to get the address of TLS by calling pthread_self().", 24 ) 25 26 27 @pwndbg.commands.ArgparsedCommand(parser, category=CommandCategory.LINUX) 28 @pwndbg.commands.OnlyWhenRunning 29 @pwndbg.commands.OnlyWhenUserspace 30 def tls(pthread_self=False) -> None: 31 tls_base = ( 32 pwndbg.gdblib.tls.find_address_with_register() 33 if not pthread_self 34 else pwndbg.gdblib.tls.find_address_with_pthread_self() 35 ) 36 if pwndbg.gdblib.memory.is_readable_address(tls_base): 37 print(message.success("Thread Local Storage (TLS) base: %#x" % tls_base)) 38 print(message.success("TLS is located at:")) 39 print(message.notice(pwndbg.gdblib.vmmap.find(tls_base))) 40 return 41 print(message.error("Couldn't find Thread Local Storage (TLS) base.")) 42 if not pthread_self: 43 print( 44 message.notice( 45 "You can try to use -p/--pthread option to get the address of TLS by calling pthread_self().\n" 46 "(This might cause problems if the pthread_self() is not in libc or not initialized yet.)" 47 ) 48 ) 49 [end of pwndbg/commands/tls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/commands/tls.py b/pwndbg/commands/tls.py --- a/pwndbg/commands/tls.py +++ b/pwndbg/commands/tls.py @@ -5,6 +5,10 @@ import argparse +import gdb +from tabulate import tabulate + +import pwndbg.color.memory as M import pwndbg.commands import pwndbg.gdblib.tls from pwndbg.color import message @@ -46,3 +50,97 @@ "(This might cause problems if the pthread_self() is not in libc or not initialized yet.)" ) ) + + +parser = argparse.ArgumentParser( + formatter_class=argparse.RawTextHelpFormatter, + description="List all threads belonging to the selected inferior.", +) +group = parser.add_mutually_exclusive_group() + +group.add_argument( + "num_threads", + type=int, + nargs="?", + default=None, + help="Number of threads to display. Omit to display all threads.", +) + +group.add_argument( + "-c", + "--config", + action="store_true", + dest="respect_config", + help="Respect context-max-threads config to limit number of threads displayed.", +) + + +@pwndbg.commands.ArgparsedCommand(parser, category=CommandCategory.LINUX) +@pwndbg.commands.OnlyWhenRunning +@pwndbg.commands.OnlyWhenUserspace +def threads(num_threads, respect_config) -> None: + table = [] + headers = ["global_num", "name", "status", "pc", "symbol"] + bold_green = lambda text: pwndbg.color.bold(pwndbg.color.green(text)) + + try: + original_thread = gdb.selected_thread() + except SystemError: + original_thread = None + + all_threads = gdb.selected_inferior().threads()[::-1] + + displayed_threads = [] + + if original_thread is not None and original_thread.is_valid(): + displayed_threads.append(original_thread) + + for thread in all_threads: + if respect_config and len(displayed_threads) >= int( + pwndbg.commands.context.config_max_threads_display + ): + break + elif num_threads is not None and len(displayed_threads) >= num_threads: + break + + if thread.is_valid() and thread is not original_thread: + displayed_threads.append(thread) + + for thread in displayed_threads: + name = thread.name or "" + + if thread is original_thread: + row = [ + bold_green(thread.global_num), + bold_green(name), + ] + else: + row = [ + thread.global_num, + name, + ] + + row.append(pwndbg.commands.context.get_thread_status(thread)) + + if thread.is_stopped(): + thread.switch() + pc = gdb.selected_frame().pc() + + pc_colored = M.get(pc) + symbol = pwndbg.gdblib.symbol.get(pc) + + row.append(pc_colored) + + if symbol: + if thread is original_thread: + row.append(bold_green(symbol)) + else: + row.append(symbol) + + table.append(row) + + if original_thread is not None and original_thread.is_valid(): + original_thread.switch() + + print(tabulate(table, headers)) + print(f"\nShowing {len(displayed_threads)} of {len(all_threads)} threads.")
{"golden_diff": "diff --git a/pwndbg/commands/tls.py b/pwndbg/commands/tls.py\n--- a/pwndbg/commands/tls.py\n+++ b/pwndbg/commands/tls.py\n@@ -5,6 +5,10 @@\n \n import argparse\n \n+import gdb\n+from tabulate import tabulate\n+\n+import pwndbg.color.memory as M\n import pwndbg.commands\n import pwndbg.gdblib.tls\n from pwndbg.color import message\n@@ -46,3 +50,97 @@\n \"(This might cause problems if the pthread_self() is not in libc or not initialized yet.)\"\n )\n )\n+\n+\n+parser = argparse.ArgumentParser(\n+ formatter_class=argparse.RawTextHelpFormatter,\n+ description=\"List all threads belonging to the selected inferior.\",\n+)\n+group = parser.add_mutually_exclusive_group()\n+\n+group.add_argument(\n+ \"num_threads\",\n+ type=int,\n+ nargs=\"?\",\n+ default=None,\n+ help=\"Number of threads to display. Omit to display all threads.\",\n+)\n+\n+group.add_argument(\n+ \"-c\",\n+ \"--config\",\n+ action=\"store_true\",\n+ dest=\"respect_config\",\n+ help=\"Respect context-max-threads config to limit number of threads displayed.\",\n+)\n+\n+\n+@pwndbg.commands.ArgparsedCommand(parser, category=CommandCategory.LINUX)\n+@pwndbg.commands.OnlyWhenRunning\n+@pwndbg.commands.OnlyWhenUserspace\n+def threads(num_threads, respect_config) -> None:\n+ table = []\n+ headers = [\"global_num\", \"name\", \"status\", \"pc\", \"symbol\"]\n+ bold_green = lambda text: pwndbg.color.bold(pwndbg.color.green(text))\n+\n+ try:\n+ original_thread = gdb.selected_thread()\n+ except SystemError:\n+ original_thread = None\n+\n+ all_threads = gdb.selected_inferior().threads()[::-1]\n+\n+ displayed_threads = []\n+\n+ if original_thread is not None and original_thread.is_valid():\n+ displayed_threads.append(original_thread)\n+\n+ for thread in all_threads:\n+ if respect_config and len(displayed_threads) >= int(\n+ pwndbg.commands.context.config_max_threads_display\n+ ):\n+ break\n+ elif num_threads is not None and len(displayed_threads) >= num_threads:\n+ break\n+\n+ if thread.is_valid() and thread is not original_thread:\n+ displayed_threads.append(thread)\n+\n+ for thread in displayed_threads:\n+ name = thread.name or \"\"\n+\n+ if thread is original_thread:\n+ row = [\n+ bold_green(thread.global_num),\n+ bold_green(name),\n+ ]\n+ else:\n+ row = [\n+ thread.global_num,\n+ name,\n+ ]\n+\n+ row.append(pwndbg.commands.context.get_thread_status(thread))\n+\n+ if thread.is_stopped():\n+ thread.switch()\n+ pc = gdb.selected_frame().pc()\n+\n+ pc_colored = M.get(pc)\n+ symbol = pwndbg.gdblib.symbol.get(pc)\n+\n+ row.append(pc_colored)\n+\n+ if symbol:\n+ if thread is original_thread:\n+ row.append(bold_green(symbol))\n+ else:\n+ row.append(symbol)\n+\n+ table.append(row)\n+\n+ if original_thread is not None and original_thread.is_valid():\n+ original_thread.switch()\n+\n+ print(tabulate(table, headers))\n+ print(f\"\\nShowing {len(displayed_threads)} of {len(all_threads)} threads.\")\n", "issue": "The `ctx threads` (or `threads`) should display all threads no matter of context threads limit\ncc: @CptGibbon we should probably add this option for convenience :)\n", "before_files": [{"content": "\"\"\"\nCommand to print the information of the current Thread Local Storage (TLS).\n\"\"\"\nfrom __future__ import annotations\n\nimport argparse\n\nimport pwndbg.commands\nimport pwndbg.gdblib.tls\nfrom pwndbg.color import message\nfrom pwndbg.commands import CommandCategory\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n description=\"Print out base address of the current Thread Local Storage (TLS).\",\n)\n\nparser.add_argument(\n \"-p\",\n \"--pthread-self\",\n action=\"store_true\",\n default=False,\n help=\"Try to get the address of TLS by calling pthread_self().\",\n)\n\n\n@pwndbg.commands.ArgparsedCommand(parser, category=CommandCategory.LINUX)\n@pwndbg.commands.OnlyWhenRunning\n@pwndbg.commands.OnlyWhenUserspace\ndef tls(pthread_self=False) -> None:\n tls_base = (\n pwndbg.gdblib.tls.find_address_with_register()\n if not pthread_self\n else pwndbg.gdblib.tls.find_address_with_pthread_self()\n )\n if pwndbg.gdblib.memory.is_readable_address(tls_base):\n print(message.success(\"Thread Local Storage (TLS) base: %#x\" % tls_base))\n print(message.success(\"TLS is located at:\"))\n print(message.notice(pwndbg.gdblib.vmmap.find(tls_base)))\n return\n print(message.error(\"Couldn't find Thread Local Storage (TLS) base.\"))\n if not pthread_self:\n print(\n message.notice(\n \"You can try to use -p/--pthread option to get the address of TLS by calling pthread_self().\\n\"\n \"(This might cause problems if the pthread_self() is not in libc or not initialized yet.)\"\n )\n )\n", "path": "pwndbg/commands/tls.py"}]}
1,045
781
gh_patches_debug_12510
rasdani/github-patches
git_diff
web2py__web2py-2115
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Python 3 compatibility fix https://github.com/web2py/web2py/blob/master/gluon/contrib/webclient.py#L124 should be `data['_formname'] = list(self.forms.keys())[0]` to avoid `TypeError: 'dict_keys' object does not support indexing` when this is executing under Python 3. </issue> <code> [start of gluon/contrib/webclient.py] 1 """ 2 Developed by Massimo Di Pierro 3 Released under the web2py license (LGPL) 4 5 It an interface on top of urllib2 which simplifies scripting of http requests 6 mostly for testing purposes 7 8 - customizable 9 - supports basic auth 10 - supports cookies 11 - supports session cookies (tested with web2py sessions) 12 - detects broken session 13 - detects web2py form postbacks and handles formname and formkey 14 - detects web2py tickets 15 16 Some examples at the bottom. 17 """ 18 from __future__ import print_function 19 from gluon._compat import urllib2, cookielib, iteritems, to_native, urlencode, to_bytes 20 import re 21 import time 22 23 24 DEFAULT_HEADERS = { 25 'user-agent': 'Mozilla/4.0', # some servers are picky 26 'accept-language': 'en', 27 } 28 29 FORM_REGEX = re.compile('(\<input name\="_formkey" type\="hidden" value\="(?P<formkey>.+?)" \/\>)?\<input name\="_formname" type\="hidden" value\="(?P<formname>.+?)" \/\>') 30 31 SESSION_REGEX = 'session_id_(?P<name>.+)' 32 33 34 class WebClient(object): 35 36 def __init__(self, 37 app='', 38 postbacks=True, 39 default_headers=DEFAULT_HEADERS, 40 session_regex=SESSION_REGEX): 41 self.app = app 42 self.postbacks = postbacks 43 self.forms = {} 44 self.history = [] 45 self.cookies = {} 46 self.cookiejar = cookielib.CookieJar() 47 self.default_headers = default_headers 48 self.sessions = {} 49 self.session_regex = session_regex and re.compile(session_regex) 50 self.headers = {} 51 52 def _parse_headers_in_cookies(self): 53 self.cookies = {} 54 if 'set-cookie' in self.headers: 55 for item in self.headers['set-cookie'].split(','): 56 cookie = item[:item.find(';')] 57 pos = cookie.find('=') 58 key = cookie[:pos] 59 value = cookie[pos+1:] 60 self.cookies[key.strip()] = value.strip() 61 62 def get(self, url, cookies=None, headers=None, auth=None): 63 return self.post(url, data=None, cookies=cookies, 64 headers=headers, method='GET') 65 66 def post(self, url, data=None, cookies=None, 67 headers=None, auth=None, method='auto'): 68 self.url = self.app + url 69 70 # if this POST form requires a postback do it 71 if data and '_formname' in data and self.postbacks and \ 72 self.history and self.history[-1][1] != self.url: 73 # to bypass the web2py CSRF need to get formkey 74 # before submitting the form 75 self.get(url, cookies=cookies, headers=headers, auth=auth) 76 77 # unless cookies are specified, recycle cookies 78 if cookies is None: 79 cookies = self.cookies 80 cookies = cookies or {} 81 headers = headers or {} 82 83 args = [ 84 urllib2.HTTPCookieProcessor(self.cookiejar), 85 urllib2.HTTPHandler(debuglevel=0) 86 ] 87 # if required do basic auth 88 if auth: 89 auth_handler = urllib2.HTTPBasicAuthHandler() 90 auth_handler.add_password(**auth) 91 args.append(auth_handler) 92 93 opener = urllib2.build_opener(*args) 94 95 # copy headers from dict to list of key,value 96 headers_list = [] 97 for key, value in iteritems(self.default_headers): 98 if not key in headers: 99 headers[key] = value 100 for key, value in iteritems(headers): 101 if isinstance(value, (list, tuple)): 102 for v in value: 103 headers_list.append((key, v)) 104 else: 105 headers_list.append((key, value)) 106 107 # move cookies to headers 108 for key, value in iteritems(cookies): 109 headers_list.append(('Cookie', '%s=%s' % (key, value))) 110 111 # add headers to request 112 for key, value in headers_list: 113 opener.addheaders.append((key, str(value))) 114 115 # assume everything is ok and make http request 116 error = None 117 try: 118 if isinstance(data, str): 119 self.method = 'POST' if method=='auto' else method 120 elif isinstance(data, dict): 121 self.method = 'POST' if method=='auto' else method 122 # if there is only one form, set _formname automatically 123 if not '_formname' in data and len(self.forms) == 1: 124 data['_formname'] = self.forms.keys()[0] 125 126 # if there is no formkey but it is known, set it 127 if '_formname' in data and not '_formkey' in data and \ 128 data['_formname'] in self.forms: 129 data['_formkey'] = self.forms[data['_formname']] 130 131 # time the POST request 132 data = urlencode(data, doseq=True) 133 else: 134 self.method = 'GET' if method=='auto' else method 135 data = None 136 t0 = time.time() 137 self.response = opener.open(self.url, to_bytes(data)) 138 self.time = time.time() - t0 139 except urllib2.HTTPError as er: 140 error = er 141 # catch HTTP errors 142 self.time = time.time() - t0 143 self.response = er 144 145 if hasattr(self.response, 'getcode'): 146 self.status = self.response.getcode() 147 else:#python2.5 148 self.status = None 149 150 self.text = to_native(self.response.read()) 151 # In PY3 self.response.headers are case sensitive 152 self.headers = dict() 153 for h in self.response.headers: 154 self.headers[h.lower()] = self.response.headers[h] 155 156 # treat web2py tickets as special types of errors 157 if error is not None: 158 if 'web2py_error' in self.headers: 159 raise RuntimeError(self.headers['web2py_error']) 160 else: 161 raise error 162 163 self._parse_headers_in_cookies() 164 165 # check is a new session id has been issued, symptom of broken session 166 if self.session_regex is not None: 167 for cookie, value in iteritems(self.cookies): 168 match = self.session_regex.match(cookie) 169 if match: 170 name = match.group('name') 171 if name in self.sessions and self.sessions[name] != value: 172 print(RuntimeError('Changed session ID %s' % name)) 173 self.sessions[name] = value 174 175 # find all forms and formkeys in page 176 self.forms = {} 177 for match in FORM_REGEX.finditer(to_native(self.text)): 178 self.forms[match.group('formname')] = match.group('formkey') 179 180 # log this request 181 self.history.append((self.method, self.url, self.status, self.time)) 182 183 184 def test_web2py_registration_and_login(): 185 # from gluon.contrib.webclient import WebClient 186 # start a web2py instance for testing 187 188 client = WebClient('http://127.0.0.1:8000/welcome/default/') 189 client.get('index') 190 191 # register 192 data = dict(first_name='Homer', 193 last_name='Simpson', 194 email='homer@web2py.com', 195 password='test', 196 password_two='test', 197 _formname='register') 198 client.post('user/register', data=data) 199 200 # logout 201 client.get('user/logout') 202 203 # login 204 data = dict(email='homer@web2py.com', 205 password='test', 206 _formname='login') 207 client.post('user/login', data=data) 208 209 # check registration and login were successful 210 client.get('user/profile') 211 assert 'Welcome Homer' in client.text 212 213 # print some variables 214 print('\nsessions:\n', client.sessions) 215 print('\nheaders:\n', client.headers) 216 print('\ncookies:\n', client.cookies) 217 print('\nforms:\n', client.forms) 218 print() 219 for method, url, status, t in client.history: 220 print(method, url, status, t) 221 222 if __name__ == '__main__': 223 test_web2py_registration_and_login() 224 [end of gluon/contrib/webclient.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gluon/contrib/webclient.py b/gluon/contrib/webclient.py --- a/gluon/contrib/webclient.py +++ b/gluon/contrib/webclient.py @@ -121,7 +121,7 @@ self.method = 'POST' if method=='auto' else method # if there is only one form, set _formname automatically if not '_formname' in data and len(self.forms) == 1: - data['_formname'] = self.forms.keys()[0] + data['_formname'] = next(iter(self.forms.keys())) # Use the first key # if there is no formkey but it is known, set it if '_formname' in data and not '_formkey' in data and \
{"golden_diff": "diff --git a/gluon/contrib/webclient.py b/gluon/contrib/webclient.py\n--- a/gluon/contrib/webclient.py\n+++ b/gluon/contrib/webclient.py\n@@ -121,7 +121,7 @@\n self.method = 'POST' if method=='auto' else method\n # if there is only one form, set _formname automatically\n if not '_formname' in data and len(self.forms) == 1:\n- data['_formname'] = self.forms.keys()[0]\n+ data['_formname'] = next(iter(self.forms.keys())) # Use the first key\n \n # if there is no formkey but it is known, set it\n if '_formname' in data and not '_formkey' in data and \\\n", "issue": "Python 3 compatibility fix\nhttps://github.com/web2py/web2py/blob/master/gluon/contrib/webclient.py#L124 should be `data['_formname'] = list(self.forms.keys())[0]` to avoid `TypeError: 'dict_keys' object does not support indexing` when this is executing under Python 3.\n", "before_files": [{"content": "\"\"\"\nDeveloped by Massimo Di Pierro\nReleased under the web2py license (LGPL)\n\nIt an interface on top of urllib2 which simplifies scripting of http requests\nmostly for testing purposes\n\n- customizable\n- supports basic auth\n- supports cookies\n- supports session cookies (tested with web2py sessions)\n- detects broken session\n- detects web2py form postbacks and handles formname and formkey\n- detects web2py tickets\n\nSome examples at the bottom.\n\"\"\"\nfrom __future__ import print_function\nfrom gluon._compat import urllib2, cookielib, iteritems, to_native, urlencode, to_bytes\nimport re\nimport time\n\n\nDEFAULT_HEADERS = {\n 'user-agent': 'Mozilla/4.0', # some servers are picky\n 'accept-language': 'en',\n}\n\nFORM_REGEX = re.compile('(\\<input name\\=\"_formkey\" type\\=\"hidden\" value\\=\"(?P<formkey>.+?)\" \\/\\>)?\\<input name\\=\"_formname\" type\\=\"hidden\" value\\=\"(?P<formname>.+?)\" \\/\\>')\n\nSESSION_REGEX = 'session_id_(?P<name>.+)'\n\n\nclass WebClient(object):\n\n def __init__(self,\n app='',\n postbacks=True,\n default_headers=DEFAULT_HEADERS,\n session_regex=SESSION_REGEX):\n self.app = app\n self.postbacks = postbacks\n self.forms = {}\n self.history = []\n self.cookies = {}\n self.cookiejar = cookielib.CookieJar()\n self.default_headers = default_headers\n self.sessions = {}\n self.session_regex = session_regex and re.compile(session_regex)\n self.headers = {}\n\n def _parse_headers_in_cookies(self):\n self.cookies = {}\n if 'set-cookie' in self.headers:\n for item in self.headers['set-cookie'].split(','):\n cookie = item[:item.find(';')]\n pos = cookie.find('=')\n key = cookie[:pos]\n value = cookie[pos+1:]\n self.cookies[key.strip()] = value.strip()\n\n def get(self, url, cookies=None, headers=None, auth=None):\n return self.post(url, data=None, cookies=cookies,\n headers=headers, method='GET')\n\n def post(self, url, data=None, cookies=None,\n headers=None, auth=None, method='auto'):\n self.url = self.app + url\n\n # if this POST form requires a postback do it\n if data and '_formname' in data and self.postbacks and \\\n self.history and self.history[-1][1] != self.url:\n # to bypass the web2py CSRF need to get formkey\n # before submitting the form\n self.get(url, cookies=cookies, headers=headers, auth=auth)\n\n # unless cookies are specified, recycle cookies\n if cookies is None:\n cookies = self.cookies\n cookies = cookies or {}\n headers = headers or {}\n\n args = [\n urllib2.HTTPCookieProcessor(self.cookiejar),\n urllib2.HTTPHandler(debuglevel=0)\n ]\n # if required do basic auth\n if auth:\n auth_handler = urllib2.HTTPBasicAuthHandler()\n auth_handler.add_password(**auth)\n args.append(auth_handler)\n\n opener = urllib2.build_opener(*args)\n\n # copy headers from dict to list of key,value\n headers_list = []\n for key, value in iteritems(self.default_headers):\n if not key in headers:\n headers[key] = value\n for key, value in iteritems(headers):\n if isinstance(value, (list, tuple)):\n for v in value:\n headers_list.append((key, v))\n else:\n headers_list.append((key, value))\n\n # move cookies to headers\n for key, value in iteritems(cookies):\n headers_list.append(('Cookie', '%s=%s' % (key, value)))\n\n # add headers to request\n for key, value in headers_list:\n opener.addheaders.append((key, str(value)))\n\n # assume everything is ok and make http request\n error = None\n try:\n if isinstance(data, str):\n self.method = 'POST' if method=='auto' else method\n elif isinstance(data, dict):\n self.method = 'POST' if method=='auto' else method\n # if there is only one form, set _formname automatically\n if not '_formname' in data and len(self.forms) == 1:\n data['_formname'] = self.forms.keys()[0]\n\n # if there is no formkey but it is known, set it\n if '_formname' in data and not '_formkey' in data and \\\n data['_formname'] in self.forms:\n data['_formkey'] = self.forms[data['_formname']]\n\n # time the POST request\n data = urlencode(data, doseq=True)\n else:\n self.method = 'GET' if method=='auto' else method\n data = None\n t0 = time.time()\n self.response = opener.open(self.url, to_bytes(data))\n self.time = time.time() - t0\n except urllib2.HTTPError as er:\n error = er\n # catch HTTP errors\n self.time = time.time() - t0\n self.response = er\n\n if hasattr(self.response, 'getcode'):\n self.status = self.response.getcode()\n else:#python2.5\n self.status = None\n\n self.text = to_native(self.response.read())\n # In PY3 self.response.headers are case sensitive\n self.headers = dict()\n for h in self.response.headers:\n self.headers[h.lower()] = self.response.headers[h]\n\n # treat web2py tickets as special types of errors\n if error is not None:\n if 'web2py_error' in self.headers:\n raise RuntimeError(self.headers['web2py_error'])\n else:\n raise error\n\n self._parse_headers_in_cookies()\n\n # check is a new session id has been issued, symptom of broken session\n if self.session_regex is not None:\n for cookie, value in iteritems(self.cookies):\n match = self.session_regex.match(cookie)\n if match:\n name = match.group('name')\n if name in self.sessions and self.sessions[name] != value:\n print(RuntimeError('Changed session ID %s' % name))\n self.sessions[name] = value\n\n # find all forms and formkeys in page\n self.forms = {}\n for match in FORM_REGEX.finditer(to_native(self.text)):\n self.forms[match.group('formname')] = match.group('formkey')\n\n # log this request\n self.history.append((self.method, self.url, self.status, self.time))\n\n\ndef test_web2py_registration_and_login():\n # from gluon.contrib.webclient import WebClient\n # start a web2py instance for testing\n\n client = WebClient('http://127.0.0.1:8000/welcome/default/')\n client.get('index')\n\n # register\n data = dict(first_name='Homer',\n last_name='Simpson',\n email='homer@web2py.com',\n password='test',\n password_two='test',\n _formname='register')\n client.post('user/register', data=data)\n\n # logout\n client.get('user/logout')\n\n # login\n data = dict(email='homer@web2py.com',\n password='test',\n _formname='login')\n client.post('user/login', data=data)\n\n # check registration and login were successful\n client.get('user/profile')\n assert 'Welcome Homer' in client.text\n\n # print some variables\n print('\\nsessions:\\n', client.sessions)\n print('\\nheaders:\\n', client.headers)\n print('\\ncookies:\\n', client.cookies)\n print('\\nforms:\\n', client.forms)\n print()\n for method, url, status, t in client.history:\n print(method, url, status, t)\n\nif __name__ == '__main__':\n test_web2py_registration_and_login()\n", "path": "gluon/contrib/webclient.py"}]}
2,924
174
gh_patches_debug_36469
rasdani/github-patches
git_diff
rlworkgroup__garage-861
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Upgrade pycma so that garage can install without numpy The current version of pycma we use requires numpy to run its setup.py. This breaks most automated install processes. Later versions of pycma don't have this problem, but they have an API change which requires updating `garage.np.algos.cmaes`. </issue> <code> [start of setup.py] 1 """setuptools based setup module.""" 2 from setuptools import find_packages 3 from setuptools import setup 4 5 try: 6 # pylint: disable=unused-import 7 import numpy # noqa: F401 8 except ImportError: 9 raise RuntimeError( 10 'garage requires numpy in the environment to install. ' 11 'Please install numpy==1.14.5 and try again. See ' 12 'https://github.com/rlworkgroup/garage/issues/800 for more info.') 13 14 TF_VERSION = '<1.15,>=1.14.0' 15 GYM_VERSION = '==0.12.4' 16 17 # Required dependencies 18 required = [ 19 # Please keep alphabetized 20 'akro==0.0.6', 21 'cached_property', 22 'click', 23 'cloudpickle', 24 'cma==1.1.06', 25 'dowel==0.0.2', 26 'gym[atari,box2d,classic_control]' + GYM_VERSION, 27 'joblib<0.13,>=0.12', 28 'matplotlib', 29 'numpy==1.14.5', 30 'psutil', 31 # Pyglet 1.4.0 introduces some api change which breaks some 32 # gym environments 33 # See: https://github.com/openai/gym/issues/1588 34 'pyglet<1.4.0,>=1.3.0', 35 'pyprind', 36 'python-dateutil', 37 'torch==1.1.0', 38 'ray', 39 'scikit-image', 40 'scipy', 41 'tensorflow' + TF_VERSION, 42 'tensorflow-probability<0.8.0,>=0.7.0', # for tensorflow 1.12 43 'torchvision==0.3.0' 44 ] 45 46 # Dependencies for optional features 47 extras = {} 48 49 extras['mujoco'] = [ 50 'mujoco-py<2.1,>=2.0', 51 'gym[all]' + GYM_VERSION, 52 ] 53 54 extras['dm_control'] = [ 55 # dm_control throws an error during install about not being able to 56 # find a build dependency (absl-py). Later pip executes the `install` 57 # command again and the install succeeds because absl-py has been 58 # installed. This is stupid, but harmless. 59 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/7a36377879c57777e5d5b4da5aae2cd2a29b607a', # noqa: E501 60 ] 61 62 extras['all'] = list(set(sum(extras.values(), []))) 63 64 # dependencies for using gpu, not included in all 65 extras['gpu'] = ['tensorflow-gpu' + TF_VERSION] 66 67 # Development dependencies (*not* included in "all") 68 extras['dev'] = [ 69 # Please keep alphabetized 70 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501 71 'flake8', 72 'flake8-docstrings==1.3.0', 73 'flake8-import-order', 74 'pandas', 75 'pep8-naming==0.7.0', 76 'pre-commit', 77 # pydocstyle 4.0.0 breaks flake8-docstrings 1.3.0 78 # See https://gitlab.com/pycqa/flake8-docstrings/issues/36 79 'pydocstyle<4.0.0', 80 'pylint==1.9.2', 81 'pytest>=3.6', # Required for pytest-cov on Python 3.6 82 'pytest-cov', 83 'pytest-xdist', 84 'recommonmark', 85 'sphinx', 86 'sphinx_rtd_theme', 87 'yapf==0.28.0', 88 ] 89 90 with open('README.md') as f: 91 readme = f.read() 92 93 # Get the package version dynamically 94 with open('VERSION') as v: 95 version = v.read().strip() 96 97 setup( 98 name='garage', 99 version=version, 100 author='Reinforcement Learning Working Group', 101 description='A framework for reproducible reinforcement learning research', 102 url='https://github.com/rlworkgroup/garage', 103 packages=find_packages(where='src'), 104 package_dir={'': 'src'}, 105 scripts=['scripts/garage'], 106 python_requires='>=3.5', 107 install_requires=required, 108 extras_require=extras, 109 license='MIT', 110 long_description=readme, 111 long_description_content_type='text/markdown', 112 classifiers=[ 113 'Development Status :: 4 - Beta', 114 'Intended Audience :: Developers', 115 'Intended Audience :: Education', 116 'Intended Audience :: Science/Research', 117 'License :: OSI Approved :: MIT License', 118 'Programming Language :: Python :: 3.5', 119 'Programming Language :: Python :: 3.6', 120 'Programming Language :: Python :: 3.7', 121 'Programming Language :: Python :: 3 :: Only', 122 'Topic :: Scientific/Engineering :: Artificial Intelligence', 123 'Topic :: Scientific/Engineering :: Mathematics', 124 'Topic :: Software Development :: Libraries', 125 ], 126 ) 127 [end of setup.py] [start of src/garage/np/algos/cma_es.py] 1 """Covariance Matrix Adaptation Evolution Strategy.""" 2 import cma 3 from dowel import logger, tabular 4 import numpy as np 5 6 from garage.np.algos import BatchPolopt 7 8 9 class CMAES(BatchPolopt): 10 """Covariance Matrix Adaptation Evolution Strategy. 11 12 Note: 13 The CMA-ES method can hardly learn a successful policy even for 14 simple task. It is still maintained here only for consistency with 15 original rllab paper. 16 17 Args: 18 env_spec (garage.envs.EnvSpec): Environment specification. 19 policy (garage.np.policies.Policy): Action policy. 20 baseline (garage.np.baselines.Baseline): Baseline for GAE 21 (Generalized Advantage Estimation). 22 n_samples (int): Number of policies sampled in one epoch. 23 discount (float): Environment reward discount. 24 max_path_length (int): Maximum length of a single rollout. 25 sigma0 (float): Initial std for param distribution. 26 27 """ 28 29 def __init__(self, 30 env_spec, 31 policy, 32 baseline, 33 n_samples, 34 discount=0.99, 35 max_path_length=500, 36 sigma0=1.): 37 super().__init__(policy, baseline, discount, max_path_length, 38 n_samples) 39 self.env_spec = env_spec 40 self.policy = policy 41 42 self.sigma0 = sigma0 43 44 def _sample_params(self): 45 return self.es.ask(self.n_samples) 46 47 def train(self, runner): 48 """Initialize variables and start training. 49 50 Args: 51 runner (LocalRunner): LocalRunner is passed to give algorithm 52 the access to runner.step_epochs(), which provides services 53 such as snapshotting and sampler control. 54 55 Returns: 56 The average return in last epoch cycle. 57 58 """ 59 init_mean = self.policy.get_param_values() 60 self.es = cma.CMAEvolutionStrategy(init_mean, self.sigma0) 61 self.all_params = self._sample_params() 62 self.cur_params = self.all_params[0] 63 self.policy.set_param_values(self.cur_params) 64 self.all_returns = [] 65 66 return super().train(runner) 67 68 def train_once(self, itr, paths): 69 """Perform one step of policy optimization given one batch of samples. 70 71 Args: 72 itr (int): Iteration number. 73 paths (list[dict]): A list of collected paths. 74 75 """ 76 paths = self.process_samples(itr, paths) 77 78 epoch = itr // self.n_samples 79 i_sample = itr - epoch * self.n_samples 80 81 tabular.record('Epoch', epoch) 82 tabular.record('# Sample', i_sample) 83 84 rtn = paths['average_return'] 85 self.all_returns.append(paths['average_return']) 86 87 if (itr + 1) % self.n_samples == 0: 88 avg_rtns = np.array(self.all_returns) 89 self.es.tell(self.all_params, -avg_rtns) 90 self.policy.set_param_values(self.es.result()[0]) 91 92 # Clear for next epoch 93 rtn = max(self.all_returns) 94 self.all_returns.clear() 95 self.all_params = self._sample_params() 96 97 self.cur_params = self.all_params[(i_sample + 1) % self.n_samples] 98 self.policy.set_param_values(self.cur_params) 99 100 logger.log(tabular) 101 return rtn 102 [end of src/garage/np/algos/cma_es.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -2,15 +2,6 @@ from setuptools import find_packages from setuptools import setup -try: - # pylint: disable=unused-import - import numpy # noqa: F401 -except ImportError: - raise RuntimeError( - 'garage requires numpy in the environment to install. ' - 'Please install numpy==1.14.5 and try again. See ' - 'https://github.com/rlworkgroup/garage/issues/800 for more info.') - TF_VERSION = '<1.15,>=1.14.0' GYM_VERSION = '==0.12.4' @@ -21,12 +12,12 @@ 'cached_property', 'click', 'cloudpickle', - 'cma==1.1.06', + 'cma==2.7.0', 'dowel==0.0.2', 'gym[atari,box2d,classic_control]' + GYM_VERSION, 'joblib<0.13,>=0.12', 'matplotlib', - 'numpy==1.14.5', + 'numpy>=1.14.5', 'psutil', # Pyglet 1.4.0 introduces some api change which breaks some # gym environments diff --git a/src/garage/np/algos/cma_es.py b/src/garage/np/algos/cma_es.py --- a/src/garage/np/algos/cma_es.py +++ b/src/garage/np/algos/cma_es.py @@ -42,7 +42,7 @@ self.sigma0 = sigma0 def _sample_params(self): - return self.es.ask(self.n_samples) + return self.es.ask() def train(self, runner): """Initialize variables and start training. @@ -57,7 +57,8 @@ """ init_mean = self.policy.get_param_values() - self.es = cma.CMAEvolutionStrategy(init_mean, self.sigma0) + self.es = cma.CMAEvolutionStrategy(init_mean, self.sigma0, + {'popsize': self.n_samples}) self.all_params = self._sample_params() self.cur_params = self.all_params[0] self.policy.set_param_values(self.cur_params) @@ -87,7 +88,7 @@ if (itr + 1) % self.n_samples == 0: avg_rtns = np.array(self.all_returns) self.es.tell(self.all_params, -avg_rtns) - self.policy.set_param_values(self.es.result()[0]) + self.policy.set_param_values(self.es.best.get()[0]) # Clear for next epoch rtn = max(self.all_returns)
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -2,15 +2,6 @@\n from setuptools import find_packages\n from setuptools import setup\n \n-try:\n- # pylint: disable=unused-import\n- import numpy # noqa: F401\n-except ImportError:\n- raise RuntimeError(\n- 'garage requires numpy in the environment to install. '\n- 'Please install numpy==1.14.5 and try again. See '\n- 'https://github.com/rlworkgroup/garage/issues/800 for more info.')\n-\n TF_VERSION = '<1.15,>=1.14.0'\n GYM_VERSION = '==0.12.4'\n \n@@ -21,12 +12,12 @@\n 'cached_property',\n 'click',\n 'cloudpickle',\n- 'cma==1.1.06',\n+ 'cma==2.7.0',\n 'dowel==0.0.2',\n 'gym[atari,box2d,classic_control]' + GYM_VERSION,\n 'joblib<0.13,>=0.12',\n 'matplotlib',\n- 'numpy==1.14.5',\n+ 'numpy>=1.14.5',\n 'psutil',\n # Pyglet 1.4.0 introduces some api change which breaks some\n # gym environments\ndiff --git a/src/garage/np/algos/cma_es.py b/src/garage/np/algos/cma_es.py\n--- a/src/garage/np/algos/cma_es.py\n+++ b/src/garage/np/algos/cma_es.py\n@@ -42,7 +42,7 @@\n self.sigma0 = sigma0\n \n def _sample_params(self):\n- return self.es.ask(self.n_samples)\n+ return self.es.ask()\n \n def train(self, runner):\n \"\"\"Initialize variables and start training.\n@@ -57,7 +57,8 @@\n \n \"\"\"\n init_mean = self.policy.get_param_values()\n- self.es = cma.CMAEvolutionStrategy(init_mean, self.sigma0)\n+ self.es = cma.CMAEvolutionStrategy(init_mean, self.sigma0,\n+ {'popsize': self.n_samples})\n self.all_params = self._sample_params()\n self.cur_params = self.all_params[0]\n self.policy.set_param_values(self.cur_params)\n@@ -87,7 +88,7 @@\n if (itr + 1) % self.n_samples == 0:\n avg_rtns = np.array(self.all_returns)\n self.es.tell(self.all_params, -avg_rtns)\n- self.policy.set_param_values(self.es.result()[0])\n+ self.policy.set_param_values(self.es.best.get()[0])\n \n # Clear for next epoch\n rtn = max(self.all_returns)\n", "issue": "Upgrade pycma so that garage can install without numpy\nThe current version of pycma we use requires numpy to run its setup.py. This breaks most automated install processes. Later versions of pycma don't have this problem, but they have an API change which requires updating `garage.np.algos.cmaes`.\n", "before_files": [{"content": "\"\"\"setuptools based setup module.\"\"\"\nfrom setuptools import find_packages\nfrom setuptools import setup\n\ntry:\n # pylint: disable=unused-import\n import numpy # noqa: F401\nexcept ImportError:\n raise RuntimeError(\n 'garage requires numpy in the environment to install. '\n 'Please install numpy==1.14.5 and try again. See '\n 'https://github.com/rlworkgroup/garage/issues/800 for more info.')\n\nTF_VERSION = '<1.15,>=1.14.0'\nGYM_VERSION = '==0.12.4'\n\n# Required dependencies\nrequired = [\n # Please keep alphabetized\n 'akro==0.0.6',\n 'cached_property',\n 'click',\n 'cloudpickle',\n 'cma==1.1.06',\n 'dowel==0.0.2',\n 'gym[atari,box2d,classic_control]' + GYM_VERSION,\n 'joblib<0.13,>=0.12',\n 'matplotlib',\n 'numpy==1.14.5',\n 'psutil',\n # Pyglet 1.4.0 introduces some api change which breaks some\n # gym environments\n # See: https://github.com/openai/gym/issues/1588\n 'pyglet<1.4.0,>=1.3.0',\n 'pyprind',\n 'python-dateutil',\n 'torch==1.1.0',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'tensorflow' + TF_VERSION,\n 'tensorflow-probability<0.8.0,>=0.7.0', # for tensorflow 1.12\n 'torchvision==0.3.0'\n]\n\n# Dependencies for optional features\nextras = {}\n\nextras['mujoco'] = [\n 'mujoco-py<2.1,>=2.0',\n 'gym[all]' + GYM_VERSION,\n]\n\nextras['dm_control'] = [\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/7a36377879c57777e5d5b4da5aae2cd2a29b607a', # noqa: E501\n]\n\nextras['all'] = list(set(sum(extras.values(), [])))\n\n# dependencies for using gpu, not included in all\nextras['gpu'] = ['tensorflow-gpu' + TF_VERSION]\n\n# Development dependencies (*not* included in \"all\")\nextras['dev'] = [\n # Please keep alphabetized\n 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501\n 'flake8',\n 'flake8-docstrings==1.3.0',\n 'flake8-import-order',\n 'pandas',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n # pydocstyle 4.0.0 breaks flake8-docstrings 1.3.0\n # See https://gitlab.com/pycqa/flake8-docstrings/issues/36\n 'pydocstyle<4.0.0',\n 'pylint==1.9.2',\n 'pytest>=3.6', # Required for pytest-cov on Python 3.6\n 'pytest-cov',\n 'pytest-xdist',\n 'recommonmark',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf==0.28.0',\n]\n\nwith open('README.md') as f:\n readme = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n version = v.read().strip()\n\nsetup(\n name='garage',\n version=version,\n author='Reinforcement Learning Working Group',\n description='A framework for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n python_requires='>=3.5',\n install_requires=required,\n extras_require=extras,\n license='MIT',\n long_description=readme,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}, {"content": "\"\"\"Covariance Matrix Adaptation Evolution Strategy.\"\"\"\nimport cma\nfrom dowel import logger, tabular\nimport numpy as np\n\nfrom garage.np.algos import BatchPolopt\n\n\nclass CMAES(BatchPolopt):\n \"\"\"Covariance Matrix Adaptation Evolution Strategy.\n\n Note:\n The CMA-ES method can hardly learn a successful policy even for\n simple task. It is still maintained here only for consistency with\n original rllab paper.\n\n Args:\n env_spec (garage.envs.EnvSpec): Environment specification.\n policy (garage.np.policies.Policy): Action policy.\n baseline (garage.np.baselines.Baseline): Baseline for GAE\n (Generalized Advantage Estimation).\n n_samples (int): Number of policies sampled in one epoch.\n discount (float): Environment reward discount.\n max_path_length (int): Maximum length of a single rollout.\n sigma0 (float): Initial std for param distribution.\n\n \"\"\"\n\n def __init__(self,\n env_spec,\n policy,\n baseline,\n n_samples,\n discount=0.99,\n max_path_length=500,\n sigma0=1.):\n super().__init__(policy, baseline, discount, max_path_length,\n n_samples)\n self.env_spec = env_spec\n self.policy = policy\n\n self.sigma0 = sigma0\n\n def _sample_params(self):\n return self.es.ask(self.n_samples)\n\n def train(self, runner):\n \"\"\"Initialize variables and start training.\n\n Args:\n runner (LocalRunner): LocalRunner is passed to give algorithm\n the access to runner.step_epochs(), which provides services\n such as snapshotting and sampler control.\n\n Returns:\n The average return in last epoch cycle.\n\n \"\"\"\n init_mean = self.policy.get_param_values()\n self.es = cma.CMAEvolutionStrategy(init_mean, self.sigma0)\n self.all_params = self._sample_params()\n self.cur_params = self.all_params[0]\n self.policy.set_param_values(self.cur_params)\n self.all_returns = []\n\n return super().train(runner)\n\n def train_once(self, itr, paths):\n \"\"\"Perform one step of policy optimization given one batch of samples.\n\n Args:\n itr (int): Iteration number.\n paths (list[dict]): A list of collected paths.\n\n \"\"\"\n paths = self.process_samples(itr, paths)\n\n epoch = itr // self.n_samples\n i_sample = itr - epoch * self.n_samples\n\n tabular.record('Epoch', epoch)\n tabular.record('# Sample', i_sample)\n\n rtn = paths['average_return']\n self.all_returns.append(paths['average_return'])\n\n if (itr + 1) % self.n_samples == 0:\n avg_rtns = np.array(self.all_returns)\n self.es.tell(self.all_params, -avg_rtns)\n self.policy.set_param_values(self.es.result()[0])\n\n # Clear for next epoch\n rtn = max(self.all_returns)\n self.all_returns.clear()\n self.all_params = self._sample_params()\n\n self.cur_params = self.all_params[(i_sample + 1) % self.n_samples]\n self.policy.set_param_values(self.cur_params)\n\n logger.log(tabular)\n return rtn\n", "path": "src/garage/np/algos/cma_es.py"}]}
3,009
646
gh_patches_debug_16592
rasdani/github-patches
git_diff
pyg-team__pytorch_geometric-2691
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PointCNN dilated KNN might select less than `K` neighbours ## 🐛 Bug In the current X-Conv implementation, I can see the following: https://github.com/rusty1s/pytorch_geometric/blob/e8e004439e3204a7b888a21e508c45d166c5817c/torch_geometric/nn/conv/x_conv.py#L130-L137 As, `torch.randint` would sample with replacement, there's a chance that the number of actual neighbours can be less than `K`. I am thinking we can fix this using something like: ``` indices = torch.randperm(K * dil)[:K] ``` </issue> <code> [start of torch_geometric/nn/conv/x_conv.py] 1 from typing import Optional 2 3 from math import ceil 4 5 import torch 6 from torch import Tensor 7 from torch.nn import Sequential as S, Linear as L, BatchNorm1d as BN 8 from torch.nn import ELU, Conv1d 9 from torch_geometric.nn import Reshape 10 11 from ..inits import reset 12 13 try: 14 from torch_cluster import knn_graph 15 except ImportError: 16 knn_graph = None 17 18 19 class XConv(torch.nn.Module): 20 r"""The convolutional operator on :math:`\mathcal{X}`-transformed points 21 from the `"PointCNN: Convolution On X-Transformed Points" 22 <https://arxiv.org/abs/1801.07791>`_ paper 23 24 .. math:: 25 \mathbf{x}^{\prime}_i = \mathrm{Conv}\left(\mathbf{K}, 26 \gamma_{\mathbf{\Theta}}(\mathbf{P}_i - \mathbf{p}_i) \times 27 \left( h_\mathbf{\Theta}(\mathbf{P}_i - \mathbf{p}_i) \, \Vert \, 28 \mathbf{x}_i \right) \right), 29 30 where :math:`\mathbf{K}` and :math:`\mathbf{P}_i` denote the trainable 31 filter and neighboring point positions of :math:`\mathbf{x}_i`, 32 respectively. 33 :math:`\gamma_{\mathbf{\Theta}}` and :math:`h_{\mathbf{\Theta}}` describe 34 neural networks, *i.e.* MLPs, where :math:`h_{\mathbf{\Theta}}` 35 individually lifts each point into a higher-dimensional space, and 36 :math:`\gamma_{\mathbf{\Theta}}` computes the :math:`\mathcal{X}`- 37 transformation matrix based on *all* points in a neighborhood. 38 39 Args: 40 in_channels (int): Size of each input sample. 41 out_channels (int): Size of each output sample. 42 dim (int): Point cloud dimensionality. 43 kernel_size (int): Size of the convolving kernel, *i.e.* number of 44 neighbors including self-loops. 45 hidden_channels (int, optional): Output size of 46 :math:`h_{\mathbf{\Theta}}`, *i.e.* dimensionality of lifted 47 points. If set to :obj:`None`, will be automatically set to 48 :obj:`in_channels / 4`. (default: :obj:`None`) 49 dilation (int, optional): The factor by which the neighborhood is 50 extended, from which :obj:`kernel_size` neighbors are then 51 uniformly sampled. Can be interpreted as the dilation rate of 52 classical convolutional operators. (default: :obj:`1`) 53 bias (bool, optional): If set to :obj:`False`, the layer will not learn 54 an additive bias. (default: :obj:`True`) 55 num_workers (int): Number of workers to use for k-NN computation. 56 Has no effect in case :obj:`batch` is not :obj:`None`, or the input 57 lies on the GPU. (default: :obj:`1`) 58 """ 59 def __init__(self, in_channels: int, out_channels: int, dim: int, 60 kernel_size: int, hidden_channels: Optional[int] = None, 61 dilation: int = 1, bias: bool = True, num_workers: int = 1): 62 super(XConv, self).__init__() 63 64 if knn_graph is None: 65 raise ImportError('`XConv` requires `torch-cluster`.') 66 67 self.in_channels = in_channels 68 if hidden_channels is None: 69 hidden_channels = in_channels // 4 70 assert hidden_channels > 0 71 self.hidden_channels = hidden_channels 72 self.out_channels = out_channels 73 self.dim = dim 74 self.kernel_size = kernel_size 75 self.dilation = dilation 76 self.num_workers = num_workers 77 78 C_in, C_delta, C_out = in_channels, hidden_channels, out_channels 79 D, K = dim, kernel_size 80 81 self.mlp1 = S( 82 L(dim, C_delta), 83 ELU(), 84 BN(C_delta), 85 L(C_delta, C_delta), 86 ELU(), 87 BN(C_delta), 88 Reshape(-1, K, C_delta), 89 ) 90 91 self.mlp2 = S( 92 L(D * K, K**2), 93 ELU(), 94 BN(K**2), 95 Reshape(-1, K, K), 96 Conv1d(K, K**2, K, groups=K), 97 ELU(), 98 BN(K**2), 99 Reshape(-1, K, K), 100 Conv1d(K, K**2, K, groups=K), 101 BN(K**2), 102 Reshape(-1, K, K), 103 ) 104 105 C_in = C_in + C_delta 106 depth_multiplier = int(ceil(C_out / C_in)) 107 self.conv = S( 108 Conv1d(C_in, C_in * depth_multiplier, K, groups=C_in), 109 Reshape(-1, C_in * depth_multiplier), 110 L(C_in * depth_multiplier, C_out, bias=bias), 111 ) 112 113 self.reset_parameters() 114 115 def reset_parameters(self): 116 reset(self.mlp1) 117 reset(self.mlp2) 118 reset(self.conv) 119 120 def forward(self, x: Tensor, pos: Tensor, batch: Optional[Tensor] = None): 121 """""" 122 pos = pos.unsqueeze(-1) if pos.dim() == 1 else pos 123 (N, D), K = pos.size(), self.kernel_size 124 125 edge_index = knn_graph(pos, K * self.dilation, batch, loop=True, 126 flow='target_to_source', 127 num_workers=self.num_workers) 128 row, col = edge_index[0], edge_index[1] 129 130 if self.dilation > 1: 131 dil = self.dilation 132 index = torch.randint(K * dil, (N, K), dtype=torch.long, 133 device=row.device) 134 arange = torch.arange(N, dtype=torch.long, device=row.device) 135 arange = arange * (K * dil) 136 index = (index + arange.view(-1, 1)).view(-1) 137 row, col = row[index], col[index] 138 139 pos = pos[col] - pos[row] 140 141 x_star = self.mlp1(pos) 142 if x is not None: 143 x = x.unsqueeze(-1) if x.dim() == 1 else x 144 x = x[col].view(N, K, self.in_channels) 145 x_star = torch.cat([x_star, x], dim=-1) 146 x_star = x_star.transpose(1, 2).contiguous() 147 148 transform_matrix = self.mlp2(pos.view(N, K * D)) 149 150 x_transformed = torch.matmul(x_star, transform_matrix) 151 152 out = self.conv(x_transformed) 153 154 return out 155 156 def __repr__(self): 157 return '{}({}, {})'.format(self.__class__.__name__, self.in_channels, 158 self.out_channels) 159 [end of torch_geometric/nn/conv/x_conv.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torch_geometric/nn/conv/x_conv.py b/torch_geometric/nn/conv/x_conv.py --- a/torch_geometric/nn/conv/x_conv.py +++ b/torch_geometric/nn/conv/x_conv.py @@ -125,16 +125,11 @@ edge_index = knn_graph(pos, K * self.dilation, batch, loop=True, flow='target_to_source', num_workers=self.num_workers) - row, col = edge_index[0], edge_index[1] if self.dilation > 1: - dil = self.dilation - index = torch.randint(K * dil, (N, K), dtype=torch.long, - device=row.device) - arange = torch.arange(N, dtype=torch.long, device=row.device) - arange = arange * (K * dil) - index = (index + arange.view(-1, 1)).view(-1) - row, col = row[index], col[index] + edge_index = edge_index[:, ::K] + + row, col = edge_index[0], edge_index[1] pos = pos[col] - pos[row]
{"golden_diff": "diff --git a/torch_geometric/nn/conv/x_conv.py b/torch_geometric/nn/conv/x_conv.py\n--- a/torch_geometric/nn/conv/x_conv.py\n+++ b/torch_geometric/nn/conv/x_conv.py\n@@ -125,16 +125,11 @@\n edge_index = knn_graph(pos, K * self.dilation, batch, loop=True,\n flow='target_to_source',\n num_workers=self.num_workers)\n- row, col = edge_index[0], edge_index[1]\n \n if self.dilation > 1:\n- dil = self.dilation\n- index = torch.randint(K * dil, (N, K), dtype=torch.long,\n- device=row.device)\n- arange = torch.arange(N, dtype=torch.long, device=row.device)\n- arange = arange * (K * dil)\n- index = (index + arange.view(-1, 1)).view(-1)\n- row, col = row[index], col[index]\n+ edge_index = edge_index[:, ::K]\n+\n+ row, col = edge_index[0], edge_index[1]\n \n pos = pos[col] - pos[row]\n", "issue": "PointCNN dilated KNN might select less than `K` neighbours \n## \ud83d\udc1b Bug\r\n\r\nIn the current X-Conv implementation, I can see the following:\r\n\r\nhttps://github.com/rusty1s/pytorch_geometric/blob/e8e004439e3204a7b888a21e508c45d166c5817c/torch_geometric/nn/conv/x_conv.py#L130-L137\r\n\r\nAs, `torch.randint` would sample with replacement, there's a chance that the number of actual neighbours can be less than `K`. I am thinking we can fix this using something like:\r\n\r\n```\r\nindices = torch.randperm(K * dil)[:K]\r\n```\n", "before_files": [{"content": "from typing import Optional\n\nfrom math import ceil\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Sequential as S, Linear as L, BatchNorm1d as BN\nfrom torch.nn import ELU, Conv1d\nfrom torch_geometric.nn import Reshape\n\nfrom ..inits import reset\n\ntry:\n from torch_cluster import knn_graph\nexcept ImportError:\n knn_graph = None\n\n\nclass XConv(torch.nn.Module):\n r\"\"\"The convolutional operator on :math:`\\mathcal{X}`-transformed points\n from the `\"PointCNN: Convolution On X-Transformed Points\"\n <https://arxiv.org/abs/1801.07791>`_ paper\n\n .. math::\n \\mathbf{x}^{\\prime}_i = \\mathrm{Conv}\\left(\\mathbf{K},\n \\gamma_{\\mathbf{\\Theta}}(\\mathbf{P}_i - \\mathbf{p}_i) \\times\n \\left( h_\\mathbf{\\Theta}(\\mathbf{P}_i - \\mathbf{p}_i) \\, \\Vert \\,\n \\mathbf{x}_i \\right) \\right),\n\n where :math:`\\mathbf{K}` and :math:`\\mathbf{P}_i` denote the trainable\n filter and neighboring point positions of :math:`\\mathbf{x}_i`,\n respectively.\n :math:`\\gamma_{\\mathbf{\\Theta}}` and :math:`h_{\\mathbf{\\Theta}}` describe\n neural networks, *i.e.* MLPs, where :math:`h_{\\mathbf{\\Theta}}`\n individually lifts each point into a higher-dimensional space, and\n :math:`\\gamma_{\\mathbf{\\Theta}}` computes the :math:`\\mathcal{X}`-\n transformation matrix based on *all* points in a neighborhood.\n\n Args:\n in_channels (int): Size of each input sample.\n out_channels (int): Size of each output sample.\n dim (int): Point cloud dimensionality.\n kernel_size (int): Size of the convolving kernel, *i.e.* number of\n neighbors including self-loops.\n hidden_channels (int, optional): Output size of\n :math:`h_{\\mathbf{\\Theta}}`, *i.e.* dimensionality of lifted\n points. If set to :obj:`None`, will be automatically set to\n :obj:`in_channels / 4`. (default: :obj:`None`)\n dilation (int, optional): The factor by which the neighborhood is\n extended, from which :obj:`kernel_size` neighbors are then\n uniformly sampled. Can be interpreted as the dilation rate of\n classical convolutional operators. (default: :obj:`1`)\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n num_workers (int): Number of workers to use for k-NN computation.\n Has no effect in case :obj:`batch` is not :obj:`None`, or the input\n lies on the GPU. (default: :obj:`1`)\n \"\"\"\n def __init__(self, in_channels: int, out_channels: int, dim: int,\n kernel_size: int, hidden_channels: Optional[int] = None,\n dilation: int = 1, bias: bool = True, num_workers: int = 1):\n super(XConv, self).__init__()\n\n if knn_graph is None:\n raise ImportError('`XConv` requires `torch-cluster`.')\n\n self.in_channels = in_channels\n if hidden_channels is None:\n hidden_channels = in_channels // 4\n assert hidden_channels > 0\n self.hidden_channels = hidden_channels\n self.out_channels = out_channels\n self.dim = dim\n self.kernel_size = kernel_size\n self.dilation = dilation\n self.num_workers = num_workers\n\n C_in, C_delta, C_out = in_channels, hidden_channels, out_channels\n D, K = dim, kernel_size\n\n self.mlp1 = S(\n L(dim, C_delta),\n ELU(),\n BN(C_delta),\n L(C_delta, C_delta),\n ELU(),\n BN(C_delta),\n Reshape(-1, K, C_delta),\n )\n\n self.mlp2 = S(\n L(D * K, K**2),\n ELU(),\n BN(K**2),\n Reshape(-1, K, K),\n Conv1d(K, K**2, K, groups=K),\n ELU(),\n BN(K**2),\n Reshape(-1, K, K),\n Conv1d(K, K**2, K, groups=K),\n BN(K**2),\n Reshape(-1, K, K),\n )\n\n C_in = C_in + C_delta\n depth_multiplier = int(ceil(C_out / C_in))\n self.conv = S(\n Conv1d(C_in, C_in * depth_multiplier, K, groups=C_in),\n Reshape(-1, C_in * depth_multiplier),\n L(C_in * depth_multiplier, C_out, bias=bias),\n )\n\n self.reset_parameters()\n\n def reset_parameters(self):\n reset(self.mlp1)\n reset(self.mlp2)\n reset(self.conv)\n\n def forward(self, x: Tensor, pos: Tensor, batch: Optional[Tensor] = None):\n \"\"\"\"\"\"\n pos = pos.unsqueeze(-1) if pos.dim() == 1 else pos\n (N, D), K = pos.size(), self.kernel_size\n\n edge_index = knn_graph(pos, K * self.dilation, batch, loop=True,\n flow='target_to_source',\n num_workers=self.num_workers)\n row, col = edge_index[0], edge_index[1]\n\n if self.dilation > 1:\n dil = self.dilation\n index = torch.randint(K * dil, (N, K), dtype=torch.long,\n device=row.device)\n arange = torch.arange(N, dtype=torch.long, device=row.device)\n arange = arange * (K * dil)\n index = (index + arange.view(-1, 1)).view(-1)\n row, col = row[index], col[index]\n\n pos = pos[col] - pos[row]\n\n x_star = self.mlp1(pos)\n if x is not None:\n x = x.unsqueeze(-1) if x.dim() == 1 else x\n x = x[col].view(N, K, self.in_channels)\n x_star = torch.cat([x_star, x], dim=-1)\n x_star = x_star.transpose(1, 2).contiguous()\n\n transform_matrix = self.mlp2(pos.view(N, K * D))\n\n x_transformed = torch.matmul(x_star, transform_matrix)\n\n out = self.conv(x_transformed)\n\n return out\n\n def __repr__(self):\n return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,\n self.out_channels)\n", "path": "torch_geometric/nn/conv/x_conv.py"}]}
2,625
266
gh_patches_debug_27532
rasdani/github-patches
git_diff
python-discord__site-432
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support deleting infractions The bot tries to delete infractions that fail to apply on the Discord side of things. This is cause it sends the infraction to our API before applying it on Discord. However, our API doesn't actually support the delete method so it returns a 405. </issue> <code> [start of pydis_site/apps/api/viewsets/bot/infraction.py] 1 from django.http.request import HttpRequest 2 from django_filters.rest_framework import DjangoFilterBackend 3 from rest_framework.decorators import action 4 from rest_framework.exceptions import ValidationError 5 from rest_framework.filters import OrderingFilter, SearchFilter 6 from rest_framework.mixins import ( 7 CreateModelMixin, 8 ListModelMixin, 9 RetrieveModelMixin 10 ) 11 from rest_framework.response import Response 12 from rest_framework.viewsets import GenericViewSet 13 14 from pydis_site.apps.api.models.bot.infraction import Infraction 15 from pydis_site.apps.api.serializers import ( 16 ExpandedInfractionSerializer, 17 InfractionSerializer 18 ) 19 20 21 class InfractionViewSet(CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet): 22 """ 23 View providing CRUD operations on infractions for Discord users. 24 25 ## Routes 26 ### GET /bot/infractions 27 Retrieve all infractions. 28 May be filtered by the query parameters. 29 30 #### Query parameters 31 - **active** `bool`: whether the infraction is still active 32 - **actor__id** `int`: snowflake of the user which applied the infraction 33 - **hidden** `bool`: whether the infraction is a shadow infraction 34 - **search** `str`: regular expression applied to the infraction's reason 35 - **type** `str`: the type of the infraction 36 - **user__id** `int`: snowflake of the user to which the infraction was applied 37 - **ordering** `str`: comma-separated sequence of fields to order the returned results 38 39 Invalid query parameters are ignored. 40 41 #### Response format 42 >>> [ 43 ... { 44 ... 'id': 5, 45 ... 'inserted_at': '2018-11-22T07:24:06.132307Z', 46 ... 'expires_at': '5018-11-20T15:52:00Z', 47 ... 'active': False, 48 ... 'user': 172395097705414656, 49 ... 'actor': 125435062127820800, 50 ... 'type': 'ban', 51 ... 'reason': 'He terk my jerb!', 52 ... 'hidden': True 53 ... } 54 ... ] 55 56 #### Status codes 57 - 200: returned on success 58 59 ### GET /bot/infractions/<id:int> 60 Retrieve a single infraction by ID. 61 62 #### Response format 63 See `GET /bot/infractions`. 64 65 #### Status codes 66 - 200: returned on success 67 - 404: if an infraction with the given `id` could not be found 68 69 ### POST /bot/infractions 70 Create a new infraction and return the created infraction. 71 Only `actor`, `type`, and `user` are required. 72 The `actor` and `user` must be users known by the site. 73 74 #### Request body 75 >>> { 76 ... 'active': False, 77 ... 'actor': 125435062127820800, 78 ... 'expires_at': '5018-11-20T15:52:00+00:00', 79 ... 'hidden': True, 80 ... 'type': 'ban', 81 ... 'reason': 'He terk my jerb!', 82 ... 'user': 172395097705414656 83 ... } 84 85 #### Response format 86 See `GET /bot/infractions`. 87 88 #### Status codes 89 - 201: returned on success 90 - 400: if a given user is unknown or a field in the request body is invalid 91 92 ### PATCH /bot/infractions/<id:int> 93 Update the infraction with the given `id` and return the updated infraction. 94 Only `active`, `reason`, and `expires_at` may be updated. 95 96 #### Request body 97 >>> { 98 ... 'active': True, 99 ... 'expires_at': '4143-02-15T21:04:31+00:00', 100 ... 'reason': 'durka derr' 101 ... } 102 103 #### Response format 104 See `GET /bot/infractions`. 105 106 #### Status codes 107 - 200: returned on success 108 - 400: if a field in the request body is invalid or disallowed 109 - 404: if an infraction with the given `id` could not be found 110 111 ### Expanded routes 112 All routes support expansion of `user` and `actor` in responses. To use an expanded route, 113 append `/expanded` to the end of the route e.g. `GET /bot/infractions/expanded`. 114 115 #### Response format 116 See `GET /bot/users/<snowflake:int>` for the expanded formats of `user` and `actor`. Responses 117 are otherwise identical to their non-expanded counterparts. 118 """ 119 120 serializer_class = InfractionSerializer 121 queryset = Infraction.objects.all() 122 filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter) 123 filter_fields = ('user__id', 'actor__id', 'active', 'hidden', 'type') 124 search_fields = ('$reason',) 125 frozen_fields = ('id', 'inserted_at', 'type', 'user', 'actor', 'hidden') 126 127 def partial_update(self, request: HttpRequest, *_args, **_kwargs) -> Response: 128 """Method that handles the nuts and bolts of updating an Infraction.""" 129 for field in request.data: 130 if field in self.frozen_fields: 131 raise ValidationError({field: ['This field cannot be updated.']}) 132 133 instance = self.get_object() 134 serializer = self.get_serializer(instance, data=request.data, partial=True) 135 serializer.is_valid(raise_exception=True) 136 serializer.save() 137 138 return Response(serializer.data) 139 140 @action(url_path='expanded', detail=False) 141 def list_expanded(self, *args, **kwargs) -> Response: 142 """ 143 DRF method for listing Infraction entries. 144 145 Called by the Django Rest Framework in response to the corresponding HTTP request. 146 """ 147 self.serializer_class = ExpandedInfractionSerializer 148 return self.list(*args, **kwargs) 149 150 @list_expanded.mapping.post 151 def create_expanded(self, *args, **kwargs) -> Response: 152 """ 153 DRF method for creating an Infraction. 154 155 Called by the Django Rest Framework in response to the corresponding HTTP request. 156 """ 157 self.serializer_class = ExpandedInfractionSerializer 158 return self.create(*args, **kwargs) 159 160 @action(url_path='expanded', url_name='detail-expanded', detail=True) 161 def retrieve_expanded(self, *args, **kwargs) -> Response: 162 """ 163 DRF method for retrieving a specific Infraction. 164 165 Called by the Django Rest Framework in response to the corresponding HTTP request. 166 """ 167 self.serializer_class = ExpandedInfractionSerializer 168 return self.retrieve(*args, **kwargs) 169 170 @retrieve_expanded.mapping.patch 171 def partial_update_expanded(self, *args, **kwargs) -> Response: 172 """ 173 DRF method for updating an Infraction. 174 175 Called by the Django Rest Framework in response to the corresponding HTTP request. 176 """ 177 self.serializer_class = ExpandedInfractionSerializer 178 return self.partial_update(*args, **kwargs) 179 [end of pydis_site/apps/api/viewsets/bot/infraction.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pydis_site/apps/api/viewsets/bot/infraction.py b/pydis_site/apps/api/viewsets/bot/infraction.py --- a/pydis_site/apps/api/viewsets/bot/infraction.py +++ b/pydis_site/apps/api/viewsets/bot/infraction.py @@ -5,6 +5,7 @@ from rest_framework.filters import OrderingFilter, SearchFilter from rest_framework.mixins import ( CreateModelMixin, + DestroyModelMixin, ListModelMixin, RetrieveModelMixin ) @@ -18,7 +19,13 @@ ) -class InfractionViewSet(CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet): +class InfractionViewSet( + CreateModelMixin, + RetrieveModelMixin, + ListModelMixin, + GenericViewSet, + DestroyModelMixin +): """ View providing CRUD operations on infractions for Discord users. @@ -108,6 +115,13 @@ - 400: if a field in the request body is invalid or disallowed - 404: if an infraction with the given `id` could not be found + ### DELETE /bot/infractions/<id:int> + Delete the infraction with the given `id`. + + #### Status codes + - 204: returned on success + - 404: if a infraction with the given `id` does not exist + ### Expanded routes All routes support expansion of `user` and `actor` in responses. To use an expanded route, append `/expanded` to the end of the route e.g. `GET /bot/infractions/expanded`.
{"golden_diff": "diff --git a/pydis_site/apps/api/viewsets/bot/infraction.py b/pydis_site/apps/api/viewsets/bot/infraction.py\n--- a/pydis_site/apps/api/viewsets/bot/infraction.py\n+++ b/pydis_site/apps/api/viewsets/bot/infraction.py\n@@ -5,6 +5,7 @@\n from rest_framework.filters import OrderingFilter, SearchFilter\n from rest_framework.mixins import (\n CreateModelMixin,\n+ DestroyModelMixin,\n ListModelMixin,\n RetrieveModelMixin\n )\n@@ -18,7 +19,13 @@\n )\n \n \n-class InfractionViewSet(CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet):\n+class InfractionViewSet(\n+ CreateModelMixin,\n+ RetrieveModelMixin,\n+ ListModelMixin,\n+ GenericViewSet,\n+ DestroyModelMixin\n+):\n \"\"\"\n View providing CRUD operations on infractions for Discord users.\n \n@@ -108,6 +115,13 @@\n - 400: if a field in the request body is invalid or disallowed\n - 404: if an infraction with the given `id` could not be found\n \n+ ### DELETE /bot/infractions/<id:int>\n+ Delete the infraction with the given `id`.\n+\n+ #### Status codes\n+ - 204: returned on success\n+ - 404: if a infraction with the given `id` does not exist\n+\n ### Expanded routes\n All routes support expansion of `user` and `actor` in responses. To use an expanded route,\n append `/expanded` to the end of the route e.g. `GET /bot/infractions/expanded`.\n", "issue": "Support deleting infractions\nThe bot tries to delete infractions that fail to apply on the Discord side of things. This is cause it sends the infraction to our API before applying it on Discord. However, our API doesn't actually support the delete method so it returns a 405.\n", "before_files": [{"content": "from django.http.request import HttpRequest\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.filters import OrderingFilter, SearchFilter\nfrom rest_framework.mixins import (\n CreateModelMixin,\n ListModelMixin,\n RetrieveModelMixin\n)\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom pydis_site.apps.api.models.bot.infraction import Infraction\nfrom pydis_site.apps.api.serializers import (\n ExpandedInfractionSerializer,\n InfractionSerializer\n)\n\n\nclass InfractionViewSet(CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet):\n \"\"\"\n View providing CRUD operations on infractions for Discord users.\n\n ## Routes\n ### GET /bot/infractions\n Retrieve all infractions.\n May be filtered by the query parameters.\n\n #### Query parameters\n - **active** `bool`: whether the infraction is still active\n - **actor__id** `int`: snowflake of the user which applied the infraction\n - **hidden** `bool`: whether the infraction is a shadow infraction\n - **search** `str`: regular expression applied to the infraction's reason\n - **type** `str`: the type of the infraction\n - **user__id** `int`: snowflake of the user to which the infraction was applied\n - **ordering** `str`: comma-separated sequence of fields to order the returned results\n\n Invalid query parameters are ignored.\n\n #### Response format\n >>> [\n ... {\n ... 'id': 5,\n ... 'inserted_at': '2018-11-22T07:24:06.132307Z',\n ... 'expires_at': '5018-11-20T15:52:00Z',\n ... 'active': False,\n ... 'user': 172395097705414656,\n ... 'actor': 125435062127820800,\n ... 'type': 'ban',\n ... 'reason': 'He terk my jerb!',\n ... 'hidden': True\n ... }\n ... ]\n\n #### Status codes\n - 200: returned on success\n\n ### GET /bot/infractions/<id:int>\n Retrieve a single infraction by ID.\n\n #### Response format\n See `GET /bot/infractions`.\n\n #### Status codes\n - 200: returned on success\n - 404: if an infraction with the given `id` could not be found\n\n ### POST /bot/infractions\n Create a new infraction and return the created infraction.\n Only `actor`, `type`, and `user` are required.\n The `actor` and `user` must be users known by the site.\n\n #### Request body\n >>> {\n ... 'active': False,\n ... 'actor': 125435062127820800,\n ... 'expires_at': '5018-11-20T15:52:00+00:00',\n ... 'hidden': True,\n ... 'type': 'ban',\n ... 'reason': 'He terk my jerb!',\n ... 'user': 172395097705414656\n ... }\n\n #### Response format\n See `GET /bot/infractions`.\n\n #### Status codes\n - 201: returned on success\n - 400: if a given user is unknown or a field in the request body is invalid\n\n ### PATCH /bot/infractions/<id:int>\n Update the infraction with the given `id` and return the updated infraction.\n Only `active`, `reason`, and `expires_at` may be updated.\n\n #### Request body\n >>> {\n ... 'active': True,\n ... 'expires_at': '4143-02-15T21:04:31+00:00',\n ... 'reason': 'durka derr'\n ... }\n\n #### Response format\n See `GET /bot/infractions`.\n\n #### Status codes\n - 200: returned on success\n - 400: if a field in the request body is invalid or disallowed\n - 404: if an infraction with the given `id` could not be found\n\n ### Expanded routes\n All routes support expansion of `user` and `actor` in responses. To use an expanded route,\n append `/expanded` to the end of the route e.g. `GET /bot/infractions/expanded`.\n\n #### Response format\n See `GET /bot/users/<snowflake:int>` for the expanded formats of `user` and `actor`. Responses\n are otherwise identical to their non-expanded counterparts.\n \"\"\"\n\n serializer_class = InfractionSerializer\n queryset = Infraction.objects.all()\n filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)\n filter_fields = ('user__id', 'actor__id', 'active', 'hidden', 'type')\n search_fields = ('$reason',)\n frozen_fields = ('id', 'inserted_at', 'type', 'user', 'actor', 'hidden')\n\n def partial_update(self, request: HttpRequest, *_args, **_kwargs) -> Response:\n \"\"\"Method that handles the nuts and bolts of updating an Infraction.\"\"\"\n for field in request.data:\n if field in self.frozen_fields:\n raise ValidationError({field: ['This field cannot be updated.']})\n\n instance = self.get_object()\n serializer = self.get_serializer(instance, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data)\n\n @action(url_path='expanded', detail=False)\n def list_expanded(self, *args, **kwargs) -> Response:\n \"\"\"\n DRF method for listing Infraction entries.\n\n Called by the Django Rest Framework in response to the corresponding HTTP request.\n \"\"\"\n self.serializer_class = ExpandedInfractionSerializer\n return self.list(*args, **kwargs)\n\n @list_expanded.mapping.post\n def create_expanded(self, *args, **kwargs) -> Response:\n \"\"\"\n DRF method for creating an Infraction.\n\n Called by the Django Rest Framework in response to the corresponding HTTP request.\n \"\"\"\n self.serializer_class = ExpandedInfractionSerializer\n return self.create(*args, **kwargs)\n\n @action(url_path='expanded', url_name='detail-expanded', detail=True)\n def retrieve_expanded(self, *args, **kwargs) -> Response:\n \"\"\"\n DRF method for retrieving a specific Infraction.\n\n Called by the Django Rest Framework in response to the corresponding HTTP request.\n \"\"\"\n self.serializer_class = ExpandedInfractionSerializer\n return self.retrieve(*args, **kwargs)\n\n @retrieve_expanded.mapping.patch\n def partial_update_expanded(self, *args, **kwargs) -> Response:\n \"\"\"\n DRF method for updating an Infraction.\n\n Called by the Django Rest Framework in response to the corresponding HTTP request.\n \"\"\"\n self.serializer_class = ExpandedInfractionSerializer\n return self.partial_update(*args, **kwargs)\n", "path": "pydis_site/apps/api/viewsets/bot/infraction.py"}]}
2,691
375
gh_patches_debug_8221
rasdani/github-patches
git_diff
cisagov__manage.get.gov-1094
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Check Domain availability via epp-Testing ### Issue Description When adding the /availability endpoint we will need to send a CheckDomain request to epp to see if the domain is available. This epp function is already implemented in domain.py and is called available(). It just needs to be tested and updated if the test show any problem with the implementation ### AC - [x] unit tests added for available - [x] manually test via sandbox with OT&E to be sure that this is working as expected - [x] update the implementation as needed or desired - [x] in your tests, ensure that this function can be called by just doing Domain.available() and not by having an instance of a domain ### Additional Context (optional) This must be tested by using Domain.available because the /availability endpoint (when implemented) will not have access to any particular domain object and this function needs to be able to be performed on its own. ### Issue Link blocks: #1015 </issue> <code> [start of src/epplibwrapper/__init__.py] 1 import logging 2 from types import SimpleNamespace 3 4 try: 5 from epplib import constants 6 except ImportError: 7 # allow epplibwrapper to load without epplib, for testing and development 8 pass 9 10 logger = logging.getLogger(__name__) 11 12 NAMESPACE = SimpleNamespace( 13 EPP="urn:ietf:params:xml:ns:epp-1.0", 14 XSI="http://www.w3.org/2001/XMLSchema-instance", 15 FRED="noop", 16 NIC_CONTACT="urn:ietf:params:xml:ns:contact-1.0", 17 NIC_DOMAIN="urn:ietf:params:xml:ns:domain-1.0", 18 NIC_ENUMVAL="noop", 19 NIC_EXTRA_ADDR="noop", 20 NIC_HOST="urn:ietf:params:xml:ns:host-1.0", 21 NIC_KEYSET="noop", 22 NIC_NSSET="noop", 23 ) 24 25 SCHEMA_LOCATION = SimpleNamespace( 26 XSI="urn:ietf:params:xml:ns:epp-1.0 epp-1.0.xsd", 27 FRED="noop fred-1.5.0.xsd", 28 NIC_CONTACT="urn:ietf:params:xml:ns:contact-1.0 contact-1.0.xsd", 29 NIC_DOMAIN="urn:ietf:params:xml:ns:domain-1.0 domain-1.0.xsd", 30 NIC_ENUMVAL="noop enumval-1.2.0.xsd", 31 NIC_EXTRA_ADDR="noop extra-addr-1.0.0.xsd", 32 NIC_HOST="urn:ietf:params:xml:ns:host-1.0 host-1.0.xsd", 33 NIC_KEYSET="noop keyset-1.3.2.xsd", 34 NIC_NSSET="noop nsset-1.2.2.xsd", 35 ) 36 37 try: 38 constants.NAMESPACE = NAMESPACE 39 constants.SCHEMA_LOCATION = SCHEMA_LOCATION 40 except NameError: 41 pass 42 43 # Attn: these imports should NOT be at the top of the file 44 try: 45 from .client import CLIENT, commands 46 from .errors import RegistryError, ErrorCode 47 from epplib.models import common 48 except ImportError: 49 pass 50 51 __all__ = [ 52 "CLIENT", 53 "commands", 54 "common", 55 "ErrorCode", 56 "RegistryError", 57 ] 58 [end of src/epplibwrapper/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/epplibwrapper/__init__.py b/src/epplibwrapper/__init__.py --- a/src/epplibwrapper/__init__.py +++ b/src/epplibwrapper/__init__.py @@ -45,6 +45,7 @@ from .client import CLIENT, commands from .errors import RegistryError, ErrorCode from epplib.models import common + from epplib import responses except ImportError: pass @@ -52,6 +53,7 @@ "CLIENT", "commands", "common", + "responses", "ErrorCode", "RegistryError", ]
{"golden_diff": "diff --git a/src/epplibwrapper/__init__.py b/src/epplibwrapper/__init__.py\n--- a/src/epplibwrapper/__init__.py\n+++ b/src/epplibwrapper/__init__.py\n@@ -45,6 +45,7 @@\n from .client import CLIENT, commands\n from .errors import RegistryError, ErrorCode\n from epplib.models import common\n+ from epplib import responses\n except ImportError:\n pass\n \n@@ -52,6 +53,7 @@\n \"CLIENT\",\n \"commands\",\n \"common\",\n+ \"responses\",\n \"ErrorCode\",\n \"RegistryError\",\n ]\n", "issue": "Check Domain availability via epp-Testing\n### Issue Description\r\n\r\nWhen adding the /availability endpoint we will need to send a CheckDomain request to epp to see if the domain is available. This epp function is already implemented in domain.py and is called available(). It just needs to be tested and updated if the test show any problem with the implementation\r\n\r\n### AC\r\n\r\n- [x] unit tests added for available\r\n- [x] manually test via sandbox with OT&E to be sure that this is working as expected \r\n- [x] update the implementation as needed or desired\r\n- [x] in your tests, ensure that this function can be called by just doing Domain.available() and not by having an instance of a domain\r\n\r\n### Additional Context (optional)\r\n\r\nThis must be tested by using Domain.available because the /availability endpoint (when implemented) will not have access to any particular domain object and this function needs to be able to be performed on its own.\r\n\r\n### Issue Link\r\nblocks: #1015 \n", "before_files": [{"content": "import logging\nfrom types import SimpleNamespace\n\ntry:\n from epplib import constants\nexcept ImportError:\n # allow epplibwrapper to load without epplib, for testing and development\n pass\n\nlogger = logging.getLogger(__name__)\n\nNAMESPACE = SimpleNamespace(\n EPP=\"urn:ietf:params:xml:ns:epp-1.0\",\n XSI=\"http://www.w3.org/2001/XMLSchema-instance\",\n FRED=\"noop\",\n NIC_CONTACT=\"urn:ietf:params:xml:ns:contact-1.0\",\n NIC_DOMAIN=\"urn:ietf:params:xml:ns:domain-1.0\",\n NIC_ENUMVAL=\"noop\",\n NIC_EXTRA_ADDR=\"noop\",\n NIC_HOST=\"urn:ietf:params:xml:ns:host-1.0\",\n NIC_KEYSET=\"noop\",\n NIC_NSSET=\"noop\",\n)\n\nSCHEMA_LOCATION = SimpleNamespace(\n XSI=\"urn:ietf:params:xml:ns:epp-1.0 epp-1.0.xsd\",\n FRED=\"noop fred-1.5.0.xsd\",\n NIC_CONTACT=\"urn:ietf:params:xml:ns:contact-1.0 contact-1.0.xsd\",\n NIC_DOMAIN=\"urn:ietf:params:xml:ns:domain-1.0 domain-1.0.xsd\",\n NIC_ENUMVAL=\"noop enumval-1.2.0.xsd\",\n NIC_EXTRA_ADDR=\"noop extra-addr-1.0.0.xsd\",\n NIC_HOST=\"urn:ietf:params:xml:ns:host-1.0 host-1.0.xsd\",\n NIC_KEYSET=\"noop keyset-1.3.2.xsd\",\n NIC_NSSET=\"noop nsset-1.2.2.xsd\",\n)\n\ntry:\n constants.NAMESPACE = NAMESPACE\n constants.SCHEMA_LOCATION = SCHEMA_LOCATION\nexcept NameError:\n pass\n\n# Attn: these imports should NOT be at the top of the file\ntry:\n from .client import CLIENT, commands\n from .errors import RegistryError, ErrorCode\n from epplib.models import common\nexcept ImportError:\n pass\n\n__all__ = [\n \"CLIENT\",\n \"commands\",\n \"common\",\n \"ErrorCode\",\n \"RegistryError\",\n]\n", "path": "src/epplibwrapper/__init__.py"}]}
1,347
140
gh_patches_debug_13162
rasdani/github-patches
git_diff
chainer__chainer-2143
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Stop using ABC in Serializer AbstractSerializer is currently written as an abstract base class. I don't think it is needed to support ABC. </issue> <code> [start of chainer/serializer.py] 1 import abc 2 3 import six 4 5 6 @six.add_metaclass(abc.ABCMeta) 7 class AbstractSerializer(object): 8 9 """Abstract base class of all serializers and deserializers.""" 10 11 @abc.abstractmethod 12 def __getitem__(self, key): 13 """Gets a child serializer. 14 15 This operator creates a _child_ serializer represented by the given 16 key. 17 18 Args: 19 key (str): Name of the child serializer. 20 21 """ 22 raise NotImplementedError 23 24 @abc.abstractmethod 25 def __call__(self, key, value): 26 """Serializes or deserializes a value by given name. 27 28 This operator saves or loads a value by given name. 29 30 If this is a serializer, then the value is simply saved at the key. 31 Note that some type information might be missed depending on the 32 implementation (and the target file format). 33 34 If this is a deserializer, then the value is loaded by the key. The 35 deserialization differently works on scalars and arrays. For scalars, 36 the ``value`` argument is used just for determining the type of 37 restored value to be converted, and the converted value is returned. 38 For arrays, the restored elements are directly copied into the 39 ``value`` argument. String values are treated like scalars. If the 40 ``value`` argument is ``None``, the type of the restored value will 41 typically be a numpy array but can depend on the particular subclass 42 implementation. 43 44 Args: 45 key (str): Name of the serialization entry. 46 value (scalar, array, None, or str): Object to be (de)serialized. 47 ``None`` is only supported by deserializers. 48 49 Returns: 50 Serialized or deserialized value. 51 52 """ 53 raise NotImplementedError 54 55 56 class Serializer(AbstractSerializer): 57 58 """Base class of all serializers.""" 59 60 def save(self, obj): 61 """Saves an object by this serializer. 62 63 This is equivalent to ``obj.serialize(self)``. 64 65 Args: 66 obj: Target object to be serialized. 67 68 """ 69 obj.serialize(self) 70 71 72 class Deserializer(AbstractSerializer): 73 74 """Base class of all deserializers.""" 75 76 def load(self, obj): 77 """Loads an object from this deserializer. 78 79 This is equivalent to ``obj.serialize(self)``. 80 81 Args: 82 obj: Target object to be serialized. 83 84 """ 85 obj.serialize(self) 86 [end of chainer/serializer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/serializer.py b/chainer/serializer.py --- a/chainer/serializer.py +++ b/chainer/serializer.py @@ -1,14 +1,7 @@ -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) class AbstractSerializer(object): """Abstract base class of all serializers and deserializers.""" - @abc.abstractmethod def __getitem__(self, key): """Gets a child serializer. @@ -21,7 +14,6 @@ """ raise NotImplementedError - @abc.abstractmethod def __call__(self, key, value): """Serializes or deserializes a value by given name.
{"golden_diff": "diff --git a/chainer/serializer.py b/chainer/serializer.py\n--- a/chainer/serializer.py\n+++ b/chainer/serializer.py\n@@ -1,14 +1,7 @@\n-import abc\n-\n-import six\n-\n-\n-@six.add_metaclass(abc.ABCMeta)\n class AbstractSerializer(object):\n \n \"\"\"Abstract base class of all serializers and deserializers.\"\"\"\n \n- @abc.abstractmethod\n def __getitem__(self, key):\n \"\"\"Gets a child serializer.\n \n@@ -21,7 +14,6 @@\n \"\"\"\n raise NotImplementedError\n \n- @abc.abstractmethod\n def __call__(self, key, value):\n \"\"\"Serializes or deserializes a value by given name.\n", "issue": "Stop using ABC in Serializer\nAbstractSerializer is currently written as an abstract base class. I don't think it is needed to support ABC.\n", "before_files": [{"content": "import abc\n\nimport six\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass AbstractSerializer(object):\n\n \"\"\"Abstract base class of all serializers and deserializers.\"\"\"\n\n @abc.abstractmethod\n def __getitem__(self, key):\n \"\"\"Gets a child serializer.\n\n This operator creates a _child_ serializer represented by the given\n key.\n\n Args:\n key (str): Name of the child serializer.\n\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def __call__(self, key, value):\n \"\"\"Serializes or deserializes a value by given name.\n\n This operator saves or loads a value by given name.\n\n If this is a serializer, then the value is simply saved at the key.\n Note that some type information might be missed depending on the\n implementation (and the target file format).\n\n If this is a deserializer, then the value is loaded by the key. The\n deserialization differently works on scalars and arrays. For scalars,\n the ``value`` argument is used just for determining the type of\n restored value to be converted, and the converted value is returned.\n For arrays, the restored elements are directly copied into the\n ``value`` argument. String values are treated like scalars. If the\n ``value`` argument is ``None``, the type of the restored value will\n typically be a numpy array but can depend on the particular subclass\n implementation.\n\n Args:\n key (str): Name of the serialization entry.\n value (scalar, array, None, or str): Object to be (de)serialized.\n ``None`` is only supported by deserializers.\n\n Returns:\n Serialized or deserialized value.\n\n \"\"\"\n raise NotImplementedError\n\n\nclass Serializer(AbstractSerializer):\n\n \"\"\"Base class of all serializers.\"\"\"\n\n def save(self, obj):\n \"\"\"Saves an object by this serializer.\n\n This is equivalent to ``obj.serialize(self)``.\n\n Args:\n obj: Target object to be serialized.\n\n \"\"\"\n obj.serialize(self)\n\n\nclass Deserializer(AbstractSerializer):\n\n \"\"\"Base class of all deserializers.\"\"\"\n\n def load(self, obj):\n \"\"\"Loads an object from this deserializer.\n\n This is equivalent to ``obj.serialize(self)``.\n\n Args:\n obj: Target object to be serialized.\n\n \"\"\"\n obj.serialize(self)\n", "path": "chainer/serializer.py"}]}
1,245
158
gh_patches_debug_29494
rasdani/github-patches
git_diff
ray-project__ray-4336
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> rllib rollout does not load the model automatically from params.json <!-- General questions should be asked on the mailing list ray-dev@googlegroups.com. Questions about how to use Ray should be asked on [StackOverflow](https://stackoverflow.com/questions/tagged/ray). Before submitting an issue, please fill out the following form. --> ### System information - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux 4.4.0-135-generic x86_64 - **Python version**: Python 3.6.5 <!-- You can obtain the Ray version with python -c "import ray; print(ray.__version__)" --> ### Describe the problem <!-- Describe the problem clearly here. --> ### Source code / logs <!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. --> rllib rollout does not load the model automatically from params.json for a simple 256x256x256x256 model. When I run rllib rollout without specifying --config with "model": {"fcnet_hiddens": [256, 256, 256, 256]} it fails with the following error: ``` assert len(vector) == i, "Passed weight does not have the correct shape." AssertionError: Passed weight does not have the correct shape. ``` </issue> <code> [start of python/ray/rllib/rollout.py] 1 #!/usr/bin/env python 2 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 7 import argparse 8 import json 9 import os 10 import pickle 11 12 import gym 13 import ray 14 from ray.rllib.agents.registry import get_agent_class 15 16 EXAMPLE_USAGE = """ 17 Example Usage via RLlib CLI: 18 rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN 19 --env CartPole-v0 --steps 1000000 --out rollouts.pkl 20 21 Example Usage via executable: 22 ./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN 23 --env CartPole-v0 --steps 1000000 --out rollouts.pkl 24 """ 25 26 # Note: if you use any custom models or envs, register them here first, e.g.: 27 # 28 # ModelCatalog.register_custom_model("pa_model", ParametricActionsModel) 29 # register_env("pa_cartpole", lambda _: ParametricActionCartpole(10)) 30 31 32 def create_parser(parser_creator=None): 33 parser_creator = parser_creator or argparse.ArgumentParser 34 parser = parser_creator( 35 formatter_class=argparse.RawDescriptionHelpFormatter, 36 description="Roll out a reinforcement learning agent " 37 "given a checkpoint.", 38 epilog=EXAMPLE_USAGE) 39 40 parser.add_argument( 41 "checkpoint", type=str, help="Checkpoint from which to roll out.") 42 required_named = parser.add_argument_group("required named arguments") 43 required_named.add_argument( 44 "--run", 45 type=str, 46 required=True, 47 help="The algorithm or model to train. This may refer to the name " 48 "of a built-on algorithm (e.g. RLLib's DQN or PPO), or a " 49 "user-defined trainable function or class registered in the " 50 "tune registry.") 51 required_named.add_argument( 52 "--env", type=str, help="The gym environment to use.") 53 parser.add_argument( 54 "--no-render", 55 default=False, 56 action="store_const", 57 const=True, 58 help="Surpress rendering of the environment.") 59 parser.add_argument( 60 "--steps", default=10000, help="Number of steps to roll out.") 61 parser.add_argument("--out", default=None, help="Output filename.") 62 parser.add_argument( 63 "--config", 64 default="{}", 65 type=json.loads, 66 help="Algorithm-specific configuration (e.g. env, hyperparams). " 67 "Surpresses loading of configuration from checkpoint.") 68 return parser 69 70 71 def run(args, parser): 72 config = args.config 73 if not config: 74 # Load configuration from file 75 config_dir = os.path.dirname(args.checkpoint) 76 config_path = os.path.join(config_dir, "params.pkl") 77 if not os.path.exists(config_path): 78 config_path = os.path.join(config_dir, "../params.pkl") 79 if not os.path.exists(config_path): 80 raise ValueError( 81 "Could not find params.pkl in either the checkpoint dir or " 82 "its parent directory.") 83 with open(config_path, 'rb') as f: 84 config = pickle.load(f) 85 if "num_workers" in config: 86 config["num_workers"] = min(2, config["num_workers"]) 87 88 if not args.env: 89 if not config.get("env"): 90 parser.error("the following arguments are required: --env") 91 args.env = config.get("env") 92 93 ray.init() 94 95 cls = get_agent_class(args.run) 96 agent = cls(env=args.env, config=config) 97 agent.restore(args.checkpoint) 98 num_steps = int(args.steps) 99 rollout(agent, args.env, num_steps, args.out, args.no_render) 100 101 102 def rollout(agent, env_name, num_steps, out=None, no_render=True): 103 if hasattr(agent, "local_evaluator"): 104 env = agent.local_evaluator.env 105 multiagent = agent.local_evaluator.multiagent 106 if multiagent: 107 policy_agent_mapping = agent.config["multiagent"][ 108 "policy_mapping_fn"] 109 mapping_cache = {} 110 policy_map = agent.local_evaluator.policy_map 111 state_init = {p: m.get_initial_state() for p, m in policy_map.items()} 112 use_lstm = {p: len(s) > 0 for p, s in state_init.items()} 113 else: 114 env = gym.make(env_name) 115 multiagent = False 116 use_lstm = {'default': False} 117 118 if out is not None: 119 rollouts = [] 120 steps = 0 121 while steps < (num_steps or steps + 1): 122 if out is not None: 123 rollout = [] 124 state = env.reset() 125 done = False 126 reward_total = 0.0 127 while not done and steps < (num_steps or steps + 1): 128 if multiagent: 129 action_dict = {} 130 for agent_id in state.keys(): 131 a_state = state[agent_id] 132 if a_state is not None: 133 policy_id = mapping_cache.setdefault( 134 agent_id, policy_agent_mapping(agent_id)) 135 p_use_lstm = use_lstm[policy_id] 136 if p_use_lstm: 137 a_action, p_state_init, _ = agent.compute_action( 138 a_state, 139 state=state_init[policy_id], 140 policy_id=policy_id) 141 state_init[policy_id] = p_state_init 142 else: 143 a_action = agent.compute_action( 144 a_state, policy_id=policy_id) 145 action_dict[agent_id] = a_action 146 action = action_dict 147 else: 148 if use_lstm["default"]: 149 action, state_init, _ = agent.compute_action( 150 state, state=state_init) 151 else: 152 action = agent.compute_action(state) 153 154 next_state, reward, done, _ = env.step(action) 155 156 if multiagent: 157 done = done["__all__"] 158 reward_total += sum(reward.values()) 159 else: 160 reward_total += reward 161 if not no_render: 162 env.render() 163 if out is not None: 164 rollout.append([state, action, next_state, reward, done]) 165 steps += 1 166 state = next_state 167 if out is not None: 168 rollouts.append(rollout) 169 print("Episode reward", reward_total) 170 171 if out is not None: 172 pickle.dump(rollouts, open(out, "wb")) 173 174 175 if __name__ == "__main__": 176 parser = create_parser() 177 args = parser.parse_args() 178 run(args, parser) 179 [end of python/ray/rllib/rollout.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/ray/rllib/rollout.py b/python/ray/rllib/rollout.py --- a/python/ray/rllib/rollout.py +++ b/python/ray/rllib/rollout.py @@ -12,6 +12,7 @@ import gym import ray from ray.rllib.agents.registry import get_agent_class +from ray.tune.util import merge_dicts EXAMPLE_USAGE = """ Example Usage via RLlib CLI: @@ -69,22 +70,23 @@ def run(args, parser): - config = args.config - if not config: - # Load configuration from file - config_dir = os.path.dirname(args.checkpoint) - config_path = os.path.join(config_dir, "params.pkl") - if not os.path.exists(config_path): - config_path = os.path.join(config_dir, "../params.pkl") - if not os.path.exists(config_path): + config = {} + # Load configuration from file + config_dir = os.path.dirname(args.checkpoint) + config_path = os.path.join(config_dir, "params.pkl") + if not os.path.exists(config_path): + config_path = os.path.join(config_dir, "../params.pkl") + if not os.path.exists(config_path): + if not args.config: raise ValueError( "Could not find params.pkl in either the checkpoint dir or " "its parent directory.") + else: with open(config_path, 'rb') as f: config = pickle.load(f) - if "num_workers" in config: - config["num_workers"] = min(2, config["num_workers"]) - + if "num_workers" in config: + config["num_workers"] = min(2, config["num_workers"]) + config = merge_dicts(config, args.config) if not args.env: if not config.get("env"): parser.error("the following arguments are required: --env")
{"golden_diff": "diff --git a/python/ray/rllib/rollout.py b/python/ray/rllib/rollout.py\n--- a/python/ray/rllib/rollout.py\n+++ b/python/ray/rllib/rollout.py\n@@ -12,6 +12,7 @@\n import gym\n import ray\n from ray.rllib.agents.registry import get_agent_class\n+from ray.tune.util import merge_dicts\n \n EXAMPLE_USAGE = \"\"\"\n Example Usage via RLlib CLI:\n@@ -69,22 +70,23 @@\n \n \n def run(args, parser):\n- config = args.config\n- if not config:\n- # Load configuration from file\n- config_dir = os.path.dirname(args.checkpoint)\n- config_path = os.path.join(config_dir, \"params.pkl\")\n- if not os.path.exists(config_path):\n- config_path = os.path.join(config_dir, \"../params.pkl\")\n- if not os.path.exists(config_path):\n+ config = {}\n+ # Load configuration from file\n+ config_dir = os.path.dirname(args.checkpoint)\n+ config_path = os.path.join(config_dir, \"params.pkl\")\n+ if not os.path.exists(config_path):\n+ config_path = os.path.join(config_dir, \"../params.pkl\")\n+ if not os.path.exists(config_path):\n+ if not args.config:\n raise ValueError(\n \"Could not find params.pkl in either the checkpoint dir or \"\n \"its parent directory.\")\n+ else:\n with open(config_path, 'rb') as f:\n config = pickle.load(f)\n- if \"num_workers\" in config:\n- config[\"num_workers\"] = min(2, config[\"num_workers\"])\n-\n+ if \"num_workers\" in config:\n+ config[\"num_workers\"] = min(2, config[\"num_workers\"])\n+ config = merge_dicts(config, args.config)\n if not args.env:\n if not config.get(\"env\"):\n parser.error(\"the following arguments are required: --env\")\n", "issue": "rllib rollout does not load the model automatically from params.json\n<!--\r\nGeneral questions should be asked on the mailing list ray-dev@googlegroups.com.\r\nQuestions about how to use Ray should be asked on\r\n[StackOverflow](https://stackoverflow.com/questions/tagged/ray).\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux 4.4.0-135-generic x86_64 \r\n- **Python version**: Python 3.6.5 \r\n\r\n<!--\r\nYou can obtain the Ray version with\r\n\r\npython -c \"import ray; print(ray.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\nrllib rollout does not load the model automatically from params.json for a simple 256x256x256x256 model. \r\nWhen I run rllib rollout without specifying --config with \"model\": {\"fcnet_hiddens\": [256, 256, 256, 256]} it fails with the following error:\r\n```\r\n assert len(vector) == i, \"Passed weight does not have the correct shape.\"\r\nAssertionError: Passed weight does not have the correct shape.\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport os\nimport pickle\n\nimport gym\nimport ray\nfrom ray.rllib.agents.registry import get_agent_class\n\nEXAMPLE_USAGE = \"\"\"\nExample Usage via RLlib CLI:\n rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN\n --env CartPole-v0 --steps 1000000 --out rollouts.pkl\n\nExample Usage via executable:\n ./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN\n --env CartPole-v0 --steps 1000000 --out rollouts.pkl\n\"\"\"\n\n# Note: if you use any custom models or envs, register them here first, e.g.:\n#\n# ModelCatalog.register_custom_model(\"pa_model\", ParametricActionsModel)\n# register_env(\"pa_cartpole\", lambda _: ParametricActionCartpole(10))\n\n\ndef create_parser(parser_creator=None):\n parser_creator = parser_creator or argparse.ArgumentParser\n parser = parser_creator(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Roll out a reinforcement learning agent \"\n \"given a checkpoint.\",\n epilog=EXAMPLE_USAGE)\n\n parser.add_argument(\n \"checkpoint\", type=str, help=\"Checkpoint from which to roll out.\")\n required_named = parser.add_argument_group(\"required named arguments\")\n required_named.add_argument(\n \"--run\",\n type=str,\n required=True,\n help=\"The algorithm or model to train. This may refer to the name \"\n \"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a \"\n \"user-defined trainable function or class registered in the \"\n \"tune registry.\")\n required_named.add_argument(\n \"--env\", type=str, help=\"The gym environment to use.\")\n parser.add_argument(\n \"--no-render\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Surpress rendering of the environment.\")\n parser.add_argument(\n \"--steps\", default=10000, help=\"Number of steps to roll out.\")\n parser.add_argument(\"--out\", default=None, help=\"Output filename.\")\n parser.add_argument(\n \"--config\",\n default=\"{}\",\n type=json.loads,\n help=\"Algorithm-specific configuration (e.g. env, hyperparams). \"\n \"Surpresses loading of configuration from checkpoint.\")\n return parser\n\n\ndef run(args, parser):\n config = args.config\n if not config:\n # Load configuration from file\n config_dir = os.path.dirname(args.checkpoint)\n config_path = os.path.join(config_dir, \"params.pkl\")\n if not os.path.exists(config_path):\n config_path = os.path.join(config_dir, \"../params.pkl\")\n if not os.path.exists(config_path):\n raise ValueError(\n \"Could not find params.pkl in either the checkpoint dir or \"\n \"its parent directory.\")\n with open(config_path, 'rb') as f:\n config = pickle.load(f)\n if \"num_workers\" in config:\n config[\"num_workers\"] = min(2, config[\"num_workers\"])\n\n if not args.env:\n if not config.get(\"env\"):\n parser.error(\"the following arguments are required: --env\")\n args.env = config.get(\"env\")\n\n ray.init()\n\n cls = get_agent_class(args.run)\n agent = cls(env=args.env, config=config)\n agent.restore(args.checkpoint)\n num_steps = int(args.steps)\n rollout(agent, args.env, num_steps, args.out, args.no_render)\n\n\ndef rollout(agent, env_name, num_steps, out=None, no_render=True):\n if hasattr(agent, \"local_evaluator\"):\n env = agent.local_evaluator.env\n multiagent = agent.local_evaluator.multiagent\n if multiagent:\n policy_agent_mapping = agent.config[\"multiagent\"][\n \"policy_mapping_fn\"]\n mapping_cache = {}\n policy_map = agent.local_evaluator.policy_map\n state_init = {p: m.get_initial_state() for p, m in policy_map.items()}\n use_lstm = {p: len(s) > 0 for p, s in state_init.items()}\n else:\n env = gym.make(env_name)\n multiagent = False\n use_lstm = {'default': False}\n\n if out is not None:\n rollouts = []\n steps = 0\n while steps < (num_steps or steps + 1):\n if out is not None:\n rollout = []\n state = env.reset()\n done = False\n reward_total = 0.0\n while not done and steps < (num_steps or steps + 1):\n if multiagent:\n action_dict = {}\n for agent_id in state.keys():\n a_state = state[agent_id]\n if a_state is not None:\n policy_id = mapping_cache.setdefault(\n agent_id, policy_agent_mapping(agent_id))\n p_use_lstm = use_lstm[policy_id]\n if p_use_lstm:\n a_action, p_state_init, _ = agent.compute_action(\n a_state,\n state=state_init[policy_id],\n policy_id=policy_id)\n state_init[policy_id] = p_state_init\n else:\n a_action = agent.compute_action(\n a_state, policy_id=policy_id)\n action_dict[agent_id] = a_action\n action = action_dict\n else:\n if use_lstm[\"default\"]:\n action, state_init, _ = agent.compute_action(\n state, state=state_init)\n else:\n action = agent.compute_action(state)\n\n next_state, reward, done, _ = env.step(action)\n\n if multiagent:\n done = done[\"__all__\"]\n reward_total += sum(reward.values())\n else:\n reward_total += reward\n if not no_render:\n env.render()\n if out is not None:\n rollout.append([state, action, next_state, reward, done])\n steps += 1\n state = next_state\n if out is not None:\n rollouts.append(rollout)\n print(\"Episode reward\", reward_total)\n\n if out is not None:\n pickle.dump(rollouts, open(out, \"wb\"))\n\n\nif __name__ == \"__main__\":\n parser = create_parser()\n args = parser.parse_args()\n run(args, parser)\n", "path": "python/ray/rllib/rollout.py"}]}
2,704
425
gh_patches_debug_9091
rasdani/github-patches
git_diff
pytorch__ignite-320
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ParamScheduler docs missing No docs on `ParamScheduler` and related classes on the [site](https://pytorch.org/ignite/contrib/handlers.html). </issue> <code> [start of ignite/contrib/handlers/__init__.py] 1 2 from ignite.contrib.handlers.param_scheduler import ParamScheduler, CyclicalScheduler, \ 3 LinearCyclicalScheduler, CosineAnnealingScheduler 4 5 from ignite.contrib.handlers.tqdm_logger import ProgressBar 6 7 __all__ = ['ProgressBar'] 8 [end of ignite/contrib/handlers/__init__.py] [start of ignite/contrib/engines/__init__.py] 1 # coding: utf-8 2 3 from ignite.contrib.engines.tbptt import create_supervised_tbptt_trainer 4 from ignite.contrib.engines.tbptt import Tbptt_Events 5 6 7 __all__ = ["create_supervised_tbptt_trainer", "Tbptt_Events"] 8 [end of ignite/contrib/engines/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/contrib/engines/__init__.py b/ignite/contrib/engines/__init__.py --- a/ignite/contrib/engines/__init__.py +++ b/ignite/contrib/engines/__init__.py @@ -2,6 +2,3 @@ from ignite.contrib.engines.tbptt import create_supervised_tbptt_trainer from ignite.contrib.engines.tbptt import Tbptt_Events - - -__all__ = ["create_supervised_tbptt_trainer", "Tbptt_Events"] diff --git a/ignite/contrib/handlers/__init__.py b/ignite/contrib/handlers/__init__.py --- a/ignite/contrib/handlers/__init__.py +++ b/ignite/contrib/handlers/__init__.py @@ -3,5 +3,3 @@ LinearCyclicalScheduler, CosineAnnealingScheduler from ignite.contrib.handlers.tqdm_logger import ProgressBar - -__all__ = ['ProgressBar']
{"golden_diff": "diff --git a/ignite/contrib/engines/__init__.py b/ignite/contrib/engines/__init__.py\n--- a/ignite/contrib/engines/__init__.py\n+++ b/ignite/contrib/engines/__init__.py\n@@ -2,6 +2,3 @@\n \n from ignite.contrib.engines.tbptt import create_supervised_tbptt_trainer\n from ignite.contrib.engines.tbptt import Tbptt_Events\n-\n-\n-__all__ = [\"create_supervised_tbptt_trainer\", \"Tbptt_Events\"]\ndiff --git a/ignite/contrib/handlers/__init__.py b/ignite/contrib/handlers/__init__.py\n--- a/ignite/contrib/handlers/__init__.py\n+++ b/ignite/contrib/handlers/__init__.py\n@@ -3,5 +3,3 @@\n LinearCyclicalScheduler, CosineAnnealingScheduler\n \n from ignite.contrib.handlers.tqdm_logger import ProgressBar\n-\n-__all__ = ['ProgressBar']\n", "issue": "ParamScheduler docs missing\nNo docs on `ParamScheduler` and related classes on the [site](https://pytorch.org/ignite/contrib/handlers.html).\n", "before_files": [{"content": "\nfrom ignite.contrib.handlers.param_scheduler import ParamScheduler, CyclicalScheduler, \\\n LinearCyclicalScheduler, CosineAnnealingScheduler\n\nfrom ignite.contrib.handlers.tqdm_logger import ProgressBar\n\n__all__ = ['ProgressBar']\n", "path": "ignite/contrib/handlers/__init__.py"}, {"content": "# coding: utf-8\n\nfrom ignite.contrib.engines.tbptt import create_supervised_tbptt_trainer\nfrom ignite.contrib.engines.tbptt import Tbptt_Events\n\n\n__all__ = [\"create_supervised_tbptt_trainer\", \"Tbptt_Events\"]\n", "path": "ignite/contrib/engines/__init__.py"}]}
736
224
gh_patches_debug_5758
rasdani/github-patches
git_diff
fossasia__open-event-server-2489
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Propose attendees/ticketing API With the orga app and the implementation of API endpoints in this PR https://github.com/fossasia/open-event-orga-server/pull/2379 we have the first steps to an attendee API. In how far would that overlap with a ticketing API? What is the best way to implement this and keep it generic? Do we need two APIs - Attendees and Ticketing or would that be handled in one API? Also related to https://github.com/fossasia/open-event-orga-server/issues/904 </issue> <code> [start of app/api/tickets.py] 1 from flask.ext.restplus import Namespace 2 3 from app.helpers.ticketing import TicketingManager 4 5 from .helpers.helpers import ( 6 requires_auth, 7 can_access) 8 from .helpers.utils import POST_RESPONSES 9 from .helpers.utils import Resource 10 from .helpers import custom_fields as fields 11 from ..helpers.data_getter import DataGetter 12 13 api = Namespace('tickets', description='Tickets', path='/') 14 15 ORDER = api.model('Order', { 16 'id': fields.Integer(), 17 'identifier': fields.String(), 18 'amount': fields.Float(), 19 'paid_via': fields.String(), 20 'invoice_number': fields.String(), 21 'payment_mode': fields.String(), 22 'status': fields.String(), 23 'completed_at': fields.DateTime(), 24 }) 25 26 TICKET = api.model('Ticket', { 27 'id': fields.Integer(), 28 'name': fields.String(), 29 'description': fields.String(), 30 'type': fields.String(), 31 'price': fields.Float(), 32 'quantity': fields.Integer(), 33 }) 34 35 36 @api.route('/events/<int:event_id>/tickets/') 37 class TicketsList(Resource): 38 @requires_auth 39 @api.doc('tickets', responses=POST_RESPONSES) 40 @api.marshal_list_with(TICKET) 41 def get(self, event_id): 42 """Get tickets of the event""" 43 return DataGetter.get_sales_open_tickets(event_id=event_id).all() 44 45 46 @api.route('/events/<int:event_id>/tickets/<int:ticket_id>') 47 class Ticket(Resource): 48 @requires_auth 49 @api.doc('ticket', responses=POST_RESPONSES) 50 @api.marshal_with(TICKET) 51 def get(self, event_id, ticket_id): 52 """Get information about a ticket""" 53 return TicketingManager.get_ticket(ticket_id=ticket_id) 54 55 56 57 [end of app/api/tickets.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/api/tickets.py b/app/api/tickets.py --- a/app/api/tickets.py +++ b/app/api/tickets.py @@ -52,5 +52,13 @@ """Get information about a ticket""" return TicketingManager.get_ticket(ticket_id=ticket_id) +@api.route('/events/<int:event_id>/orders/<string:identifier>') +class Order(Resource): + @requires_auth + @api.doc('order', responses=POST_RESPONSES) + @api.marshal_with(ORDER) + def get(self, event_id, identifier): + """Get information about a ticket""" + return TicketingManager.get_order_by_identifier(identifier=identifier)
{"golden_diff": "diff --git a/app/api/tickets.py b/app/api/tickets.py\n--- a/app/api/tickets.py\n+++ b/app/api/tickets.py\n@@ -52,5 +52,13 @@\n \"\"\"Get information about a ticket\"\"\"\n return TicketingManager.get_ticket(ticket_id=ticket_id)\n \n+@api.route('/events/<int:event_id>/orders/<string:identifier>')\n+class Order(Resource):\n+ @requires_auth\n+ @api.doc('order', responses=POST_RESPONSES)\n+ @api.marshal_with(ORDER)\n+ def get(self, event_id, identifier):\n+ \"\"\"Get information about a ticket\"\"\"\n+ return TicketingManager.get_order_by_identifier(identifier=identifier)\n", "issue": "Propose attendees/ticketing API\nWith the orga app and the implementation of API endpoints in this PR https://github.com/fossasia/open-event-orga-server/pull/2379 we have the first steps to an attendee API. In how far would that overlap with a ticketing API?\n\nWhat is the best way to implement this and keep it generic? Do we need two APIs - Attendees and Ticketing or would that be handled in one API?\n\nAlso related to https://github.com/fossasia/open-event-orga-server/issues/904\n\n", "before_files": [{"content": "from flask.ext.restplus import Namespace\n\nfrom app.helpers.ticketing import TicketingManager\n\nfrom .helpers.helpers import (\n requires_auth,\n can_access)\nfrom .helpers.utils import POST_RESPONSES\nfrom .helpers.utils import Resource\nfrom .helpers import custom_fields as fields\nfrom ..helpers.data_getter import DataGetter\n\napi = Namespace('tickets', description='Tickets', path='/')\n\nORDER = api.model('Order', {\n 'id': fields.Integer(),\n 'identifier': fields.String(),\n 'amount': fields.Float(),\n 'paid_via': fields.String(),\n 'invoice_number': fields.String(),\n 'payment_mode': fields.String(),\n 'status': fields.String(),\n 'completed_at': fields.DateTime(),\n})\n\nTICKET = api.model('Ticket', {\n 'id': fields.Integer(),\n 'name': fields.String(),\n 'description': fields.String(),\n 'type': fields.String(),\n 'price': fields.Float(),\n 'quantity': fields.Integer(),\n})\n\n\n@api.route('/events/<int:event_id>/tickets/')\nclass TicketsList(Resource):\n @requires_auth\n @api.doc('tickets', responses=POST_RESPONSES)\n @api.marshal_list_with(TICKET)\n def get(self, event_id):\n \"\"\"Get tickets of the event\"\"\"\n return DataGetter.get_sales_open_tickets(event_id=event_id).all()\n\n\n@api.route('/events/<int:event_id>/tickets/<int:ticket_id>')\nclass Ticket(Resource):\n @requires_auth\n @api.doc('ticket', responses=POST_RESPONSES)\n @api.marshal_with(TICKET)\n def get(self, event_id, ticket_id):\n \"\"\"Get information about a ticket\"\"\"\n return TicketingManager.get_ticket(ticket_id=ticket_id)\n\n\n\n", "path": "app/api/tickets.py"}]}
1,128
153
gh_patches_debug_17306
rasdani/github-patches
git_diff
cal-itp__benefits-2116
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Refactor claims handling for integer claims During the OAuth `authorize` flow, we look for [boolean claim values](https://github.com/cal-itp/benefits/blob/dev/benefits/oauth/views.py#L75) to determine if the user is eligible. IdG is changing their claims implementation to cut down on the size of the token being sent to Benefits. Instead of booleans, they will use integers to indicate claim values: * `0` will indicate `False` (i.e. the claim indicates eligibility failed) * `1` will indicate `True` (i.e. the claim indicates eligibility succeeded) * Any other integer `>= 10` will indicate an error code **Note:** the claim values are transmitted in the token as `str`, and should be parsed to `int` before usage. ## Acceptance Criteria <!-- Remember to consider edge cases --> - [ ] `authorize` processes integer claims as described above ## Additional context While we work to implement this change, existing flows for Older Adults and Veterans will use both claim styles. New flows for CalFresh and the new Veterans API will ~only use the newer integer claim style, so this refactor is necessary for supporting those flows.~ also support both styles to allow us time to implement and cut over. There are an entirely new set of scopes created for the integer-based claims so as not to interfere with the existing implementation. Once we have this change tested and deployed, IdG will cutover all flows to use the integer style only. Mapping error codes to error messages and analytics will be handled in #2049. See [this Slack thread](https://cal-itp.slack.com/archives/C037Y3UE71P/p1714434750536319) from @johnatstate for more context. </issue> <code> [start of benefits/oauth/views.py] 1 import logging 2 3 from django.shortcuts import redirect 4 from django.urls import reverse 5 from django.utils.decorators import decorator_from_middleware 6 7 from benefits.core import session 8 from . import analytics, redirects 9 from .client import oauth 10 from .middleware import VerifierUsesAuthVerificationSessionRequired 11 12 13 logger = logging.getLogger(__name__) 14 15 16 ROUTE_AUTH = "oauth:authorize" 17 ROUTE_START = "eligibility:start" 18 ROUTE_CONFIRM = "eligibility:confirm" 19 ROUTE_UNVERIFIED = "eligibility:unverified" 20 ROUTE_POST_LOGOUT = "oauth:post_logout" 21 22 23 @decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired) 24 def login(request): 25 """View implementing OIDC authorize_redirect.""" 26 verifier = session.verifier(request) 27 oauth_client = oauth.create_client(verifier.auth_provider.client_name) 28 29 if not oauth_client: 30 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}") 31 32 route = reverse(ROUTE_AUTH) 33 redirect_uri = redirects.generate_redirect_uri(request, route) 34 35 logger.debug(f"OAuth authorize_redirect with redirect_uri: {redirect_uri}") 36 37 analytics.started_sign_in(request) 38 39 return oauth_client.authorize_redirect(request, redirect_uri) 40 41 42 @decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired) 43 def authorize(request): 44 """View implementing OIDC token authorization.""" 45 verifier = session.verifier(request) 46 oauth_client = oauth.create_client(verifier.auth_provider.client_name) 47 48 if not oauth_client: 49 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}") 50 51 logger.debug("Attempting to authorize OAuth access token") 52 token = oauth_client.authorize_access_token(request) 53 54 if token is None: 55 logger.warning("Could not authorize OAuth access token") 56 return redirect(ROUTE_START) 57 58 logger.debug("OAuth access token authorized") 59 60 # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out. 61 id_token = token["id_token"] 62 63 # We store the returned claim in case it can be used later in eligibility verification. 64 verifier_claim = verifier.auth_provider.claim 65 stored_claim = None 66 67 if verifier_claim: 68 userinfo = token.get("userinfo") 69 70 if userinfo: 71 claim_value = userinfo.get(verifier_claim) 72 # the claim comes back in userinfo like { "claim": "True" | "False" } 73 if claim_value is None: 74 logger.warning(f"userinfo did not contain: {verifier_claim}") 75 elif claim_value.lower() == "true": 76 # if userinfo contains our claim and the flag is true, store the *claim* 77 stored_claim = verifier_claim 78 79 session.update(request, oauth_token=id_token, oauth_claim=stored_claim) 80 81 analytics.finished_sign_in(request) 82 83 return redirect(ROUTE_CONFIRM) 84 85 86 @decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired) 87 def cancel(request): 88 """View implementing cancellation of OIDC authorization.""" 89 90 analytics.canceled_sign_in(request) 91 92 return redirect(ROUTE_UNVERIFIED) 93 94 95 @decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired) 96 def logout(request): 97 """View implementing OIDC and application sign out.""" 98 verifier = session.verifier(request) 99 oauth_client = oauth.create_client(verifier.auth_provider.client_name) 100 101 if not oauth_client: 102 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}") 103 104 analytics.started_sign_out(request) 105 106 # overwrite the oauth session token, the user is signed out of the app 107 token = session.oauth_token(request) 108 session.logout(request) 109 110 route = reverse(ROUTE_POST_LOGOUT) 111 redirect_uri = redirects.generate_redirect_uri(request, route) 112 113 logger.debug(f"OAuth end_session_endpoint with redirect_uri: {redirect_uri}") 114 115 # send the user through the end_session_endpoint, redirecting back to 116 # the post_logout route 117 return redirects.deauthorize_redirect(oauth_client, token, redirect_uri) 118 119 120 @decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired) 121 def post_logout(request): 122 """View routes the user to their origin after sign out.""" 123 124 analytics.finished_sign_out(request) 125 126 origin = session.origin(request) 127 return redirect(origin) 128 [end of benefits/oauth/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py --- a/benefits/oauth/views.py +++ b/benefits/oauth/views.py @@ -69,11 +69,12 @@ if userinfo: claim_value = userinfo.get(verifier_claim) - # the claim comes back in userinfo like { "claim": "True" | "False" } + # the claim comes back in userinfo like { "claim": "1" | "0" } + claim_value = int(claim_value) if claim_value else None if claim_value is None: logger.warning(f"userinfo did not contain: {verifier_claim}") - elif claim_value.lower() == "true": - # if userinfo contains our claim and the flag is true, store the *claim* + elif claim_value == 1: + # if userinfo contains our claim and the flag is 1 (true), store the *claim* stored_claim = verifier_claim session.update(request, oauth_token=id_token, oauth_claim=stored_claim)
{"golden_diff": "diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py\n--- a/benefits/oauth/views.py\n+++ b/benefits/oauth/views.py\n@@ -69,11 +69,12 @@\n \n if userinfo:\n claim_value = userinfo.get(verifier_claim)\n- # the claim comes back in userinfo like { \"claim\": \"True\" | \"False\" }\n+ # the claim comes back in userinfo like { \"claim\": \"1\" | \"0\" }\n+ claim_value = int(claim_value) if claim_value else None\n if claim_value is None:\n logger.warning(f\"userinfo did not contain: {verifier_claim}\")\n- elif claim_value.lower() == \"true\":\n- # if userinfo contains our claim and the flag is true, store the *claim*\n+ elif claim_value == 1:\n+ # if userinfo contains our claim and the flag is 1 (true), store the *claim*\n stored_claim = verifier_claim\n \n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n", "issue": "Refactor claims handling for integer claims\nDuring the OAuth `authorize` flow, we look for [boolean claim values](https://github.com/cal-itp/benefits/blob/dev/benefits/oauth/views.py#L75) to determine if the user is eligible.\n\nIdG is changing their claims implementation to cut down on the size of the token being sent to Benefits. Instead of booleans, they will use integers to indicate claim values:\n\n* `0` will indicate `False` (i.e. the claim indicates eligibility failed)\n* `1` will indicate `True` (i.e. the claim indicates eligibility succeeded)\n* Any other integer `>= 10` will indicate an error code\n\n**Note:** the claim values are transmitted in the token as `str`, and should be parsed to `int` before usage.\n\n## Acceptance Criteria\n\n<!-- Remember to consider edge cases -->\n\n- [ ] `authorize` processes integer claims as described above\n\n## Additional context\n\nWhile we work to implement this change, existing flows for Older Adults and Veterans will use both claim styles. New flows for CalFresh and the new Veterans API will ~only use the newer integer claim style, so this refactor is necessary for supporting those flows.~ also support both styles to allow us time to implement and cut over. There are an entirely new set of scopes created for the integer-based claims so as not to interfere with the existing implementation.\n\nOnce we have this change tested and deployed, IdG will cutover all flows to use the integer style only.\n\nMapping error codes to error messages and analytics will be handled in #2049.\n\nSee [this Slack thread](https://cal-itp.slack.com/archives/C037Y3UE71P/p1714434750536319) from @johnatstate for more context.\n", "before_files": [{"content": "import logging\n\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\n\nfrom benefits.core import session\nfrom . import analytics, redirects\nfrom .client import oauth\nfrom .middleware import VerifierUsesAuthVerificationSessionRequired\n\n\nlogger = logging.getLogger(__name__)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\nROUTE_UNVERIFIED = \"eligibility:unverified\"\nROUTE_POST_LOGOUT = \"oauth:post_logout\"\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef login(request):\n \"\"\"View implementing OIDC authorize_redirect.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n\n analytics.started_sign_in(request)\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef authorize(request):\n \"\"\"View implementing OIDC token authorization.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n\n logger.debug(\"OAuth access token authorized\")\n\n # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out.\n id_token = token[\"id_token\"]\n\n # We store the returned claim in case it can be used later in eligibility verification.\n verifier_claim = verifier.auth_provider.claim\n stored_claim = None\n\n if verifier_claim:\n userinfo = token.get(\"userinfo\")\n\n if userinfo:\n claim_value = userinfo.get(verifier_claim)\n # the claim comes back in userinfo like { \"claim\": \"True\" | \"False\" }\n if claim_value is None:\n logger.warning(f\"userinfo did not contain: {verifier_claim}\")\n elif claim_value.lower() == \"true\":\n # if userinfo contains our claim and the flag is true, store the *claim*\n stored_claim = verifier_claim\n\n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n\n analytics.finished_sign_in(request)\n\n return redirect(ROUTE_CONFIRM)\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef cancel(request):\n \"\"\"View implementing cancellation of OIDC authorization.\"\"\"\n\n analytics.canceled_sign_in(request)\n\n return redirect(ROUTE_UNVERIFIED)\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef logout(request):\n \"\"\"View implementing OIDC and application sign out.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n analytics.started_sign_out(request)\n\n # overwrite the oauth session token, the user is signed out of the app\n token = session.oauth_token(request)\n session.logout(request)\n\n route = reverse(ROUTE_POST_LOGOUT)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth end_session_endpoint with redirect_uri: {redirect_uri}\")\n\n # send the user through the end_session_endpoint, redirecting back to\n # the post_logout route\n return redirects.deauthorize_redirect(oauth_client, token, redirect_uri)\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef post_logout(request):\n \"\"\"View routes the user to their origin after sign out.\"\"\"\n\n analytics.finished_sign_out(request)\n\n origin = session.origin(request)\n return redirect(origin)\n", "path": "benefits/oauth/views.py"}]}
2,098
231
gh_patches_debug_6673
rasdani/github-patches
git_diff
freqtrade__freqtrade-2884
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Strange results for download-data I recalled #2716, tried to reproduce and obtained very strange results: * remove config.json * then run: ``` $ freqtrade download-data --exchange gemini --pairs 'BTC/USD' 'LTC/USD' 'BCH/USD' 'ETH/USD' -t 1h --days 5 -v 2020-02-08 03:48:33,819 - freqtrade.loggers - INFO - Verbosity set to 1 2020-02-08 03:48:33,819 - freqtrade.configuration.configuration - INFO - Using exchange gemini 2020-02-08 03:48:33,819 - freqtrade.configuration.configuration - INFO - Using user-data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data ... 2020-02-08 03:48:33,819 - freqtrade.configuration.configuration - INFO - Using data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data/data/gemini ... 2020-02-08 03:48:33,819 - freqtrade.configuration.configuration - INFO - Using pairs ['BTC/USD', 'LTC/USD', 'BCH/USD', 'ETH/USD'] 2020-02-08 03:48:33,820 - freqtrade.configuration.configuration - INFO - timeframes --timeframes: ['1h'] 2020-02-08 03:48:33,820 - freqtrade.configuration.configuration - INFO - Detected --days: 5 2020-02-08 03:48:33,820 - freqtrade.configuration.check_exchange - INFO - Checking exchange... 2020-02-08 03:48:33,820 - freqtrade.configuration.check_exchange - WARNING - Exchange "gemini" is known to the the ccxt library, available for the bot, but not officially supported by the Freqtrade development team. It may work flawlessly (please report back) or have serious issues. Use it at your own discretion. 2020-02-08 03:48:33,820 - freqtrade.configuration.deprecated_settings - WARNING - DEPRECATED: Pairlists must be defined explicitly in the future.Defaulting to StaticPairList for now. 2020-02-08 03:48:33,820 - freqtrade.configuration.config_validation - INFO - Validating configuration ... 2020-02-08 03:48:33,824 - freqtrade.commands.data_commands - INFO - About to download pairs: ['BTC/USD', 'LTC/USD', 'BCH/USD', 'ETH/USD'], intervals: ['1h'] to /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data/data/gemini 2020-02-08 03:48:33,824 - freqtrade.resolvers.exchange_resolver - INFO - No Gemini specific subclass found. Using the generic class instead. 2020-02-08 03:48:33,824 - freqtrade.exchange.exchange - INFO - Instance is running with dry_run enabled 2020-02-08 03:48:33,826 - freqtrade.exchange.exchange - INFO - Applying additional ccxt config: {'enableRateLimit': True} 2020-02-08 03:48:33,826 - asyncio - DEBUG - Using selector: EpollSelector 2020-02-08 03:48:33,828 - freqtrade.exchange.exchange - INFO - Using Exchange "Gemini" 2020-02-08 03:48:35,437 - chardet.charsetprober - DEBUG - utf-8 confidence = 0.99 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - SHIFT_JIS Japanese confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - EUC-JP Japanese confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - GB2312 Chinese confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - EUC-KR Korean confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - CP949 Korean confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - Big5 Chinese confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - EUC-TW Taiwan confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - windows-1251 Russian confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - KOI8-R Russian confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - ISO-8859-5 Russian confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - MacCyrillic Russian confidence = 0.0 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - IBM866 Russian confidence = 0.0 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - IBM855 Russian confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - ISO-8859-7 Greek confidence = 0.0 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - windows-1253 Greek confidence = 0.0 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - ISO-8859-5 Bulgairan confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - windows-1251 Bulgarian confidence = 0.01 2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - TIS-620 Thai confidence = 0.0 2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - ISO-8859-9 Turkish confidence = 0.6551158277756192 2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - windows-1255 Hebrew confidence = 0.0 2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - windows-1255 Hebrew confidence = 0.0 2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - windows-1255 Hebrew confidence = 0.0 2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - utf-8 confidence = 0.99 2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - SHIFT_JIS Japanese confidence = 0.01 2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - EUC-JP Japanese confidence = 0.01 2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - GB2312 Chinese confidence = 0.01 2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - EUC-KR Korean confidence = 0.01 2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - CP949 Korean confidence = 0.01 2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - Big5 Chinese confidence = 0.01 2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - EUC-TW Taiwan confidence = 0.01 2020-02-08 03:48:35,441 - freqtrade - ERROR - is not available as stake on Gemini. Available currencies are: BCH, BTC, ETH, LTC, USD 2020-02-08 03:48:35,441 - freqtrade.exchange.exchange - DEBUG - Exchange object destroyed, closing async loop ``` -- note 1) strange messages from "chardet.charsetprober" module; 2) empty "something" between "ERROR - is not available". 3) strange module "freqtrade" for this message... download-data validates whitelist when --pairs is used The download-data subcommands validates whitelist when `--pairs` is used: ``` $ freqtrade download-data -c config.json --exchange kraken --pairs BTC/USD -t 1d --days 100 -v ... 2019-12-17 20:07:49,885 - freqtrade - ERROR - Pair NXT/BTC is not available on Kraken. Please remove NXT/BTC from your whitelist. ``` Expected behavior: ignore pair_whitelist, work only with pairs set in the cli. </issue> <code> [start of freqtrade/commands/data_commands.py] 1 import logging 2 import sys 3 from typing import Any, Dict, List 4 5 import arrow 6 7 from freqtrade.configuration import TimeRange, setup_utils_configuration 8 from freqtrade.data.history import (convert_trades_to_ohlcv, 9 refresh_backtest_ohlcv_data, 10 refresh_backtest_trades_data) 11 from freqtrade.exceptions import OperationalException 12 from freqtrade.resolvers import ExchangeResolver 13 from freqtrade.state import RunMode 14 15 logger = logging.getLogger(__name__) 16 17 18 def start_download_data(args: Dict[str, Any]) -> None: 19 """ 20 Download data (former download_backtest_data.py script) 21 """ 22 config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE) 23 24 timerange = TimeRange() 25 if 'days' in config: 26 time_since = arrow.utcnow().shift(days=-config['days']).strftime("%Y%m%d") 27 timerange = TimeRange.parse_timerange(f'{time_since}-') 28 29 if 'pairs' not in config: 30 raise OperationalException( 31 "Downloading data requires a list of pairs. " 32 "Please check the documentation on how to configure this.") 33 34 logger.info(f'About to download pairs: {config["pairs"]}, ' 35 f'intervals: {config["timeframes"]} to {config["datadir"]}') 36 37 pairs_not_available: List[str] = [] 38 39 # Init exchange 40 exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config) 41 try: 42 43 if config.get('download_trades'): 44 pairs_not_available = refresh_backtest_trades_data( 45 exchange, pairs=config["pairs"], datadir=config['datadir'], 46 timerange=timerange, erase=bool(config.get("erase"))) 47 48 # Convert downloaded trade data to different timeframes 49 convert_trades_to_ohlcv( 50 pairs=config["pairs"], timeframes=config["timeframes"], 51 datadir=config['datadir'], timerange=timerange, 52 erase=bool(config.get("erase"))) 53 else: 54 pairs_not_available = refresh_backtest_ohlcv_data( 55 exchange, pairs=config["pairs"], timeframes=config["timeframes"], 56 datadir=config['datadir'], timerange=timerange, 57 erase=bool(config.get("erase"))) 58 59 except KeyboardInterrupt: 60 sys.exit("SIGINT received, aborting ...") 61 62 finally: 63 if pairs_not_available: 64 logger.info(f"Pairs [{','.join(pairs_not_available)}] not available " 65 f"on exchange {exchange.name}.") 66 [end of freqtrade/commands/data_commands.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/freqtrade/commands/data_commands.py b/freqtrade/commands/data_commands.py --- a/freqtrade/commands/data_commands.py +++ b/freqtrade/commands/data_commands.py @@ -37,7 +37,12 @@ pairs_not_available: List[str] = [] # Init exchange - exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config) + exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False) + # Manual validations of relevant settings + exchange.validate_pairs(config['pairs']) + for timeframe in config['timeframes']: + exchange.validate_timeframes(timeframe) + try: if config.get('download_trades'):
{"golden_diff": "diff --git a/freqtrade/commands/data_commands.py b/freqtrade/commands/data_commands.py\n--- a/freqtrade/commands/data_commands.py\n+++ b/freqtrade/commands/data_commands.py\n@@ -37,7 +37,12 @@\n pairs_not_available: List[str] = []\n \n # Init exchange\n- exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config)\n+ exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)\n+ # Manual validations of relevant settings\n+ exchange.validate_pairs(config['pairs'])\n+ for timeframe in config['timeframes']:\n+ exchange.validate_timeframes(timeframe)\n+\n try:\n \n if config.get('download_trades'):\n", "issue": "Strange results for download-data\nI recalled #2716, tried to reproduce and obtained very strange results:\r\n\r\n* remove config.json\r\n* then run:\r\n```\r\n$ freqtrade download-data --exchange gemini --pairs 'BTC/USD' 'LTC/USD' 'BCH/USD' 'ETH/USD' -t 1h --days 5 -v\r\n2020-02-08 03:48:33,819 - freqtrade.loggers - INFO - Verbosity set to 1\r\n2020-02-08 03:48:33,819 - freqtrade.configuration.configuration - INFO - Using exchange gemini\r\n2020-02-08 03:48:33,819 - freqtrade.configuration.configuration - INFO - Using user-data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data ...\r\n2020-02-08 03:48:33,819 - freqtrade.configuration.configuration - INFO - Using data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data/data/gemini ...\r\n2020-02-08 03:48:33,819 - freqtrade.configuration.configuration - INFO - Using pairs ['BTC/USD', 'LTC/USD', 'BCH/USD', 'ETH/USD']\r\n2020-02-08 03:48:33,820 - freqtrade.configuration.configuration - INFO - timeframes --timeframes: ['1h']\r\n2020-02-08 03:48:33,820 - freqtrade.configuration.configuration - INFO - Detected --days: 5\r\n2020-02-08 03:48:33,820 - freqtrade.configuration.check_exchange - INFO - Checking exchange...\r\n2020-02-08 03:48:33,820 - freqtrade.configuration.check_exchange - WARNING - Exchange \"gemini\" is known to the the ccxt library, available for the bot, but not officially supported by the Freqtrade development team. It may work flawlessly (please report back) or have serious issues. Use it at your own discretion.\r\n2020-02-08 03:48:33,820 - freqtrade.configuration.deprecated_settings - WARNING - DEPRECATED: Pairlists must be defined explicitly in the future.Defaulting to StaticPairList for now.\r\n2020-02-08 03:48:33,820 - freqtrade.configuration.config_validation - INFO - Validating configuration ...\r\n2020-02-08 03:48:33,824 - freqtrade.commands.data_commands - INFO - About to download pairs: ['BTC/USD', 'LTC/USD', 'BCH/USD', 'ETH/USD'], intervals: ['1h'] to /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data/data/gemini\r\n2020-02-08 03:48:33,824 - freqtrade.resolvers.exchange_resolver - INFO - No Gemini specific subclass found. Using the generic class instead.\r\n2020-02-08 03:48:33,824 - freqtrade.exchange.exchange - INFO - Instance is running with dry_run enabled\r\n2020-02-08 03:48:33,826 - freqtrade.exchange.exchange - INFO - Applying additional ccxt config: {'enableRateLimit': True}\r\n2020-02-08 03:48:33,826 - asyncio - DEBUG - Using selector: EpollSelector\r\n2020-02-08 03:48:33,828 - freqtrade.exchange.exchange - INFO - Using Exchange \"Gemini\"\r\n2020-02-08 03:48:35,437 - chardet.charsetprober - DEBUG - utf-8 confidence = 0.99\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - SHIFT_JIS Japanese confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - EUC-JP Japanese confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - GB2312 Chinese confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - EUC-KR Korean confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - CP949 Korean confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - Big5 Chinese confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - EUC-TW Taiwan confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - windows-1251 Russian confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - KOI8-R Russian confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - ISO-8859-5 Russian confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - MacCyrillic Russian confidence = 0.0\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - IBM866 Russian confidence = 0.0\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - IBM855 Russian confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - ISO-8859-7 Greek confidence = 0.0\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - windows-1253 Greek confidence = 0.0\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - ISO-8859-5 Bulgairan confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - windows-1251 Bulgarian confidence = 0.01\r\n2020-02-08 03:48:35,438 - chardet.charsetprober - DEBUG - TIS-620 Thai confidence = 0.0\r\n2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - ISO-8859-9 Turkish confidence = 0.6551158277756192\r\n2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - windows-1255 Hebrew confidence = 0.0\r\n2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - windows-1255 Hebrew confidence = 0.0\r\n2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - windows-1255 Hebrew confidence = 0.0\r\n2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - utf-8 confidence = 0.99\r\n2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - SHIFT_JIS Japanese confidence = 0.01\r\n2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - EUC-JP Japanese confidence = 0.01\r\n2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - GB2312 Chinese confidence = 0.01\r\n2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - EUC-KR Korean confidence = 0.01\r\n2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - CP949 Korean confidence = 0.01\r\n2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - Big5 Chinese confidence = 0.01\r\n2020-02-08 03:48:35,439 - chardet.charsetprober - DEBUG - EUC-TW Taiwan confidence = 0.01\r\n2020-02-08 03:48:35,441 - freqtrade - ERROR - is not available as stake on Gemini. Available currencies are: BCH, BTC, ETH, LTC, USD\r\n2020-02-08 03:48:35,441 - freqtrade.exchange.exchange - DEBUG - Exchange object destroyed, closing async loop\r\n```\r\n\r\n-- note 1) strange messages from \"chardet.charsetprober\" module; 2) empty \"something\" between \"ERROR - is not available\". 3) strange module \"freqtrade\" for this message...\r\n\ndownload-data validates whitelist when --pairs is used\nThe download-data subcommands validates whitelist when `--pairs` is used:\r\n\r\n```\r\n$ freqtrade download-data -c config.json --exchange kraken --pairs BTC/USD -t 1d --days 100 -v\r\n...\r\n2019-12-17 20:07:49,885 - freqtrade - ERROR - Pair NXT/BTC is not available on Kraken. Please remove NXT/BTC from your whitelist.\r\n```\r\n\r\nExpected behavior: ignore pair_whitelist, work only with pairs set in the cli.\r\n\n", "before_files": [{"content": "import logging\nimport sys\nfrom typing import Any, Dict, List\n\nimport arrow\n\nfrom freqtrade.configuration import TimeRange, setup_utils_configuration\nfrom freqtrade.data.history import (convert_trades_to_ohlcv,\n refresh_backtest_ohlcv_data,\n refresh_backtest_trades_data)\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.resolvers import ExchangeResolver\nfrom freqtrade.state import RunMode\n\nlogger = logging.getLogger(__name__)\n\n\ndef start_download_data(args: Dict[str, Any]) -> None:\n \"\"\"\n Download data (former download_backtest_data.py script)\n \"\"\"\n config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)\n\n timerange = TimeRange()\n if 'days' in config:\n time_since = arrow.utcnow().shift(days=-config['days']).strftime(\"%Y%m%d\")\n timerange = TimeRange.parse_timerange(f'{time_since}-')\n\n if 'pairs' not in config:\n raise OperationalException(\n \"Downloading data requires a list of pairs. \"\n \"Please check the documentation on how to configure this.\")\n\n logger.info(f'About to download pairs: {config[\"pairs\"]}, '\n f'intervals: {config[\"timeframes\"]} to {config[\"datadir\"]}')\n\n pairs_not_available: List[str] = []\n\n # Init exchange\n exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config)\n try:\n\n if config.get('download_trades'):\n pairs_not_available = refresh_backtest_trades_data(\n exchange, pairs=config[\"pairs\"], datadir=config['datadir'],\n timerange=timerange, erase=bool(config.get(\"erase\")))\n\n # Convert downloaded trade data to different timeframes\n convert_trades_to_ohlcv(\n pairs=config[\"pairs\"], timeframes=config[\"timeframes\"],\n datadir=config['datadir'], timerange=timerange,\n erase=bool(config.get(\"erase\")))\n else:\n pairs_not_available = refresh_backtest_ohlcv_data(\n exchange, pairs=config[\"pairs\"], timeframes=config[\"timeframes\"],\n datadir=config['datadir'], timerange=timerange,\n erase=bool(config.get(\"erase\")))\n\n except KeyboardInterrupt:\n sys.exit(\"SIGINT received, aborting ...\")\n\n finally:\n if pairs_not_available:\n logger.info(f\"Pairs [{','.join(pairs_not_available)}] not available \"\n f\"on exchange {exchange.name}.\")\n", "path": "freqtrade/commands/data_commands.py"}]}
3,824
159
gh_patches_debug_43093
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-3261
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Suggestion] Add chat(s) parameter to ChatJoinRequestHandler This param should allow to filter out chats which will be handled by the ChatJoinRequestHandler, much like the pattern argument of the CallbackQueryHandler. It should allow "username" strings as well as ids and if set, the handler should check if the incoming update is from that chat. For first time contributors, check how CallbackQueryHandler implements the pattern argument in check_update: https://github.com/python-telegram-bot/python-telegram-bot/blob/master/telegram/ext/_callbackqueryhandler.py#L123 </issue> <code> [start of telegram/ext/_chatjoinrequesthandler.py] 1 #!/usr/bin/env python 2 # 3 # A library that provides a Python interface to the Telegram Bot API 4 # Copyright (C) 2015-2022 5 # Leandro Toledo de Souza <devs@python-telegram-bot.org> 6 # 7 # This program is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU Lesser Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # This program is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU Lesser Public License for more details. 16 # 17 # You should have received a copy of the GNU Lesser Public License 18 # along with this program. If not, see [http://www.gnu.org/licenses/]. 19 """This module contains the ChatJoinRequestHandler class.""" 20 21 22 from telegram import Update 23 from telegram.ext._handler import BaseHandler 24 from telegram.ext._utils.types import CCT 25 26 27 class ChatJoinRequestHandler(BaseHandler[Update, CCT]): 28 """BaseHandler class to handle Telegram updates that contain 29 :attr:`telegram.Update.chat_join_request`. 30 31 Warning: 32 When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom 33 attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info. 34 35 .. versionadded:: 13.8 36 37 Args: 38 callback (:term:`coroutine function`): The callback function for this handler. Will be 39 called when :meth:`check_update` has determined that an update should be processed by 40 this handler. Callback signature:: 41 42 async def callback(update: Update, context: CallbackContext) 43 44 The return value of the callback is usually ignored except for the special case of 45 :class:`telegram.ext.ConversationHandler`. 46 block (:obj:`bool`, optional): Determines whether the return value of the callback should 47 be awaited before processing the next handler in 48 :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`. 49 50 Attributes: 51 callback (:term:`coroutine function`): The callback function for this handler. 52 block (:obj:`bool`): Determines whether the callback will run in a blocking way.. 53 54 """ 55 56 __slots__ = () 57 58 def check_update(self, update: object) -> bool: 59 """Determines whether an update should be passed to this handler's :attr:`callback`. 60 61 Args: 62 update (:class:`telegram.Update` | :obj:`object`): Incoming update. 63 64 Returns: 65 :obj:`bool` 66 67 """ 68 return isinstance(update, Update) and bool(update.chat_join_request) 69 [end of telegram/ext/_chatjoinrequesthandler.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/telegram/ext/_chatjoinrequesthandler.py b/telegram/ext/_chatjoinrequesthandler.py --- a/telegram/ext/_chatjoinrequesthandler.py +++ b/telegram/ext/_chatjoinrequesthandler.py @@ -18,16 +18,27 @@ # along with this program. If not, see [http://www.gnu.org/licenses/]. """This module contains the ChatJoinRequestHandler class.""" +from typing import FrozenSet, Optional from telegram import Update +from telegram._utils.defaultvalue import DEFAULT_TRUE +from telegram._utils.types import RT, SCT, DVInput from telegram.ext._handler import BaseHandler -from telegram.ext._utils.types import CCT +from telegram.ext._utils.types import CCT, HandlerCallback class ChatJoinRequestHandler(BaseHandler[Update, CCT]): """BaseHandler class to handle Telegram updates that contain :attr:`telegram.Update.chat_join_request`. + Note: + If neither of :paramref:`username` and the :paramref:`chat_id` are passed, this handler + accepts *any* join request. Otherwise, this handler accepts all requests to join chats + for which the chat ID is listed in :paramref:`chat_id` or the username is listed in + :paramref:`username`, or both. + + .. versionadded:: 20.0 + Warning: When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info. @@ -43,6 +54,14 @@ The return value of the callback is usually ignored except for the special case of :class:`telegram.ext.ConversationHandler`. + chat_id (:obj:`int` | Collection[:obj:`int`], optional): Filters requests to allow only + those which are asking to join the specified chat ID(s). + + .. versionadded:: 20.0 + username (:obj:`str` | Collection[:obj:`str`], optional): Filters requests to allow only + those which are asking to join the specified username(s). + + .. versionadded:: 20.0 block (:obj:`bool`, optional): Determines whether the return value of the callback should be awaited before processing the next handler in :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`. @@ -53,7 +72,38 @@ """ - __slots__ = () + __slots__ = ( + "_chat_ids", + "_usernames", + ) + + def __init__( + self, + callback: HandlerCallback[Update, CCT, RT], + chat_id: SCT[int] = None, + username: SCT[str] = None, + block: DVInput[bool] = DEFAULT_TRUE, + ): + super().__init__(callback, block=block) + + self._chat_ids = self._parse_chat_id(chat_id) + self._usernames = self._parse_username(username) + + @staticmethod + def _parse_chat_id(chat_id: Optional[SCT[int]]) -> FrozenSet[int]: + if chat_id is None: + return frozenset() + if isinstance(chat_id, int): + return frozenset({chat_id}) + return frozenset(chat_id) + + @staticmethod + def _parse_username(username: Optional[SCT[str]]) -> FrozenSet[str]: + if username is None: + return frozenset() + if isinstance(username, str): + return frozenset({username[1:] if username.startswith("@") else username}) + return frozenset({usr[1:] if usr.startswith("@") else usr for usr in username}) def check_update(self, update: object) -> bool: """Determines whether an update should be passed to this handler's :attr:`callback`. @@ -65,4 +115,12 @@ :obj:`bool` """ - return isinstance(update, Update) and bool(update.chat_join_request) + if isinstance(update, Update) and update.chat_join_request: + if not self._chat_ids and not self._usernames: + return True + if update.chat_join_request.chat.id in self._chat_ids: + return True + if update.chat_join_request.from_user.username in self._usernames: + return True + return False + return False
{"golden_diff": "diff --git a/telegram/ext/_chatjoinrequesthandler.py b/telegram/ext/_chatjoinrequesthandler.py\n--- a/telegram/ext/_chatjoinrequesthandler.py\n+++ b/telegram/ext/_chatjoinrequesthandler.py\n@@ -18,16 +18,27 @@\n # along with this program. If not, see [http://www.gnu.org/licenses/].\n \"\"\"This module contains the ChatJoinRequestHandler class.\"\"\"\n \n+from typing import FrozenSet, Optional\n \n from telegram import Update\n+from telegram._utils.defaultvalue import DEFAULT_TRUE\n+from telegram._utils.types import RT, SCT, DVInput\n from telegram.ext._handler import BaseHandler\n-from telegram.ext._utils.types import CCT\n+from telegram.ext._utils.types import CCT, HandlerCallback\n \n \n class ChatJoinRequestHandler(BaseHandler[Update, CCT]):\n \"\"\"BaseHandler class to handle Telegram updates that contain\n :attr:`telegram.Update.chat_join_request`.\n \n+ Note:\n+ If neither of :paramref:`username` and the :paramref:`chat_id` are passed, this handler\n+ accepts *any* join request. Otherwise, this handler accepts all requests to join chats\n+ for which the chat ID is listed in :paramref:`chat_id` or the username is listed in\n+ :paramref:`username`, or both.\n+\n+ .. versionadded:: 20.0\n+\n Warning:\n When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom\n attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.\n@@ -43,6 +54,14 @@\n \n The return value of the callback is usually ignored except for the special case of\n :class:`telegram.ext.ConversationHandler`.\n+ chat_id (:obj:`int` | Collection[:obj:`int`], optional): Filters requests to allow only\n+ those which are asking to join the specified chat ID(s).\n+\n+ .. versionadded:: 20.0\n+ username (:obj:`str` | Collection[:obj:`str`], optional): Filters requests to allow only\n+ those which are asking to join the specified username(s).\n+\n+ .. versionadded:: 20.0\n block (:obj:`bool`, optional): Determines whether the return value of the callback should\n be awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.\n@@ -53,7 +72,38 @@\n \n \"\"\"\n \n- __slots__ = ()\n+ __slots__ = (\n+ \"_chat_ids\",\n+ \"_usernames\",\n+ )\n+\n+ def __init__(\n+ self,\n+ callback: HandlerCallback[Update, CCT, RT],\n+ chat_id: SCT[int] = None,\n+ username: SCT[str] = None,\n+ block: DVInput[bool] = DEFAULT_TRUE,\n+ ):\n+ super().__init__(callback, block=block)\n+\n+ self._chat_ids = self._parse_chat_id(chat_id)\n+ self._usernames = self._parse_username(username)\n+\n+ @staticmethod\n+ def _parse_chat_id(chat_id: Optional[SCT[int]]) -> FrozenSet[int]:\n+ if chat_id is None:\n+ return frozenset()\n+ if isinstance(chat_id, int):\n+ return frozenset({chat_id})\n+ return frozenset(chat_id)\n+\n+ @staticmethod\n+ def _parse_username(username: Optional[SCT[str]]) -> FrozenSet[str]:\n+ if username is None:\n+ return frozenset()\n+ if isinstance(username, str):\n+ return frozenset({username[1:] if username.startswith(\"@\") else username})\n+ return frozenset({usr[1:] if usr.startswith(\"@\") else usr for usr in username})\n \n def check_update(self, update: object) -> bool:\n \"\"\"Determines whether an update should be passed to this handler's :attr:`callback`.\n@@ -65,4 +115,12 @@\n :obj:`bool`\n \n \"\"\"\n- return isinstance(update, Update) and bool(update.chat_join_request)\n+ if isinstance(update, Update) and update.chat_join_request:\n+ if not self._chat_ids and not self._usernames:\n+ return True\n+ if update.chat_join_request.chat.id in self._chat_ids:\n+ return True\n+ if update.chat_join_request.from_user.username in self._usernames:\n+ return True\n+ return False\n+ return False\n", "issue": "[Suggestion] Add chat(s) parameter to ChatJoinRequestHandler\nThis param should allow to filter out chats which will be handled by the ChatJoinRequestHandler, much like the pattern argument of the CallbackQueryHandler. It should allow \"username\" strings as well as ids and if set, the handler should check if the incoming update is from that chat.\r\n\r\nFor first time contributors, check how CallbackQueryHandler implements the pattern argument in check_update: https://github.com/python-telegram-bot/python-telegram-bot/blob/master/telegram/ext/_callbackqueryhandler.py#L123\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2022\n# Leandro Toledo de Souza <devs@python-telegram-bot.org>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the ChatJoinRequestHandler class.\"\"\"\n\n\nfrom telegram import Update\nfrom telegram.ext._handler import BaseHandler\nfrom telegram.ext._utils.types import CCT\n\n\nclass ChatJoinRequestHandler(BaseHandler[Update, CCT]):\n \"\"\"BaseHandler class to handle Telegram updates that contain\n :attr:`telegram.Update.chat_join_request`.\n\n Warning:\n When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom\n attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.\n\n .. versionadded:: 13.8\n\n Args:\n callback (:term:`coroutine function`): The callback function for this handler. Will be\n called when :meth:`check_update` has determined that an update should be processed by\n this handler. Callback signature::\n\n async def callback(update: Update, context: CallbackContext)\n\n The return value of the callback is usually ignored except for the special case of\n :class:`telegram.ext.ConversationHandler`.\n block (:obj:`bool`, optional): Determines whether the return value of the callback should\n be awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.\n\n Attributes:\n callback (:term:`coroutine function`): The callback function for this handler.\n block (:obj:`bool`): Determines whether the callback will run in a blocking way..\n\n \"\"\"\n\n __slots__ = ()\n\n def check_update(self, update: object) -> bool:\n \"\"\"Determines whether an update should be passed to this handler's :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update` | :obj:`object`): Incoming update.\n\n Returns:\n :obj:`bool`\n\n \"\"\"\n return isinstance(update, Update) and bool(update.chat_join_request)\n", "path": "telegram/ext/_chatjoinrequesthandler.py"}]}
1,382
987
gh_patches_debug_8876
rasdani/github-patches
git_diff
microsoft__MLOS-211
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Sphinx Python API docs generation broken in recent nightly CI runs For example: <https://github.com/microsoft/MLOS/runs/1635132574?check_suite_focus=true> </issue> <code> [start of source/Mlos.Python/mlos/Spaces/Point.py] 1 # 2 # Copyright (c) Microsoft Corporation. 3 # Licensed under the MIT License. 4 # 5 import json 6 from numbers import Number 7 8 import pandas as pd 9 from mlos.Spaces.Dimensions.Dimension import Dimension 10 11 12 class Point: 13 """ Models a point in a Hypergrid. 14 15 """ 16 def __init__(self, **kwargs): 17 self.dimension_value_dict = dict() 18 for dimension_name, value in kwargs.items(): 19 self[dimension_name] = value 20 21 def copy(self): 22 return Point(**{key: value for key, value in self}) 23 24 def flat_copy(self): 25 """ Creates a copy of the point but all dimension names are flattened. 26 27 :return: 28 """ 29 flat_dict = { 30 Dimension.flatten_dimension_name(dimension_name): value 31 for dimension_name, value in self 32 } 33 return Point(**flat_dict) 34 35 def __eq__(self, other): 36 if not isinstance(other, Point): 37 return False 38 return \ 39 all(other.get(dimension_name, None) == value for dimension_name, value in self) \ 40 and \ 41 all(self.get(dimension_name, None) == value for dimension_name, value in other) 42 43 def __ne__(self, other): 44 return not self == other 45 46 def __iter__(self): 47 for dimension_name, value in self.dimension_value_dict.items(): 48 if not isinstance(value, Point): 49 yield dimension_name, value 50 else: 51 for sub_dimension_name, sub_dimension_value in value: 52 yield dimension_name + "." + sub_dimension_name, sub_dimension_value 53 54 def __getattr__(self, dimension_name): 55 if dimension_name == "__isabstractmethod__": 56 # A sad but necessary way to deal with ABC. 57 return False 58 return self[dimension_name] 59 60 def __setattr__(self, name, value): 61 if name == "dimension_value_dict": 62 self.__dict__[name] = value 63 else: 64 dimension_name = name 65 subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name) 66 if subgrid_name is None: 67 self.dimension_value_dict[dimension_name] = value 68 else: 69 point_in_subgrid = self.dimension_value_dict.get(subgrid_name, Point()) 70 point_in_subgrid[dimension_name_without_subgrid_name] = value 71 self.dimension_value_dict[subgrid_name] = point_in_subgrid 72 73 def __getitem__(self, dimension_name): 74 if dimension_name not in self: 75 raise KeyError(f"This Point does not have a value along dimension: {dimension_name}") 76 subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name) 77 if subgrid_name is None: 78 return self.dimension_value_dict[dimension_name] 79 return self[subgrid_name][dimension_name_without_subgrid_name] 80 81 def get(self, dimension_name, default=None): 82 try: 83 return self[dimension_name] 84 except KeyError: 85 return default 86 87 def __setitem__(self, dimension_name, value): 88 subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name) 89 if subgrid_name is None: 90 self.dimension_value_dict[dimension_name] = value 91 else: 92 point_in_subgrid = self.dimension_value_dict.get(subgrid_name, Point()) 93 point_in_subgrid[dimension_name_without_subgrid_name] = value 94 self.dimension_value_dict[subgrid_name] = point_in_subgrid 95 96 def __contains__(self, dimension_name): 97 subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name) 98 if subgrid_name is None: 99 return dimension_name in self.dimension_value_dict 100 if subgrid_name not in self.dimension_value_dict: 101 return False 102 return dimension_name_without_subgrid_name in self[subgrid_name] 103 104 def __repr__(self): 105 return self.__str__() 106 107 def __str__(self): 108 return str(self.to_json(indent=2)) 109 110 def __getstate__(self): 111 return self.to_json() 112 113 def __setstate__(self, state): 114 temp_point = self.from_json(state) 115 self.dimension_value_dict = temp_point.dimension_value_dict 116 117 def to_json(self, indent=None): 118 if indent is not None: 119 return json.dumps(self.to_dict(), indent=indent) 120 return json.dumps(self.to_dict()) 121 122 @classmethod 123 def from_json(cls, json_str): 124 coordinates = json.loads(json_str) 125 return Point(**coordinates) 126 127 def to_dict(self): 128 return_dict = {} 129 for param_name, value in self: 130 if isinstance(value, Number) and int(value) == value and not isinstance(value, bool): 131 value = int(value) 132 return_dict[param_name] = value 133 return return_dict 134 135 def to_dataframe(self): 136 return pd.DataFrame({param_name: [value] for param_name, value in self}) 137 138 @classmethod 139 def from_dataframe(cls, dataframe: pd.DataFrame): 140 assert len(dataframe.index) == 1 141 dataframe = dataframe.dropna(axis=1) 142 dataframe_dict = dataframe.to_dict(orient='list') 143 point_dict = {key: values[0] for key, values in dataframe_dict.items()} 144 return Point(**point_dict) 145 [end of source/Mlos.Python/mlos/Spaces/Point.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/source/Mlos.Python/mlos/Spaces/Point.py b/source/Mlos.Python/mlos/Spaces/Point.py --- a/source/Mlos.Python/mlos/Spaces/Point.py +++ b/source/Mlos.Python/mlos/Spaces/Point.py @@ -55,7 +55,10 @@ if dimension_name == "__isabstractmethod__": # A sad but necessary way to deal with ABC. return False - return self[dimension_name] + try: + return self[dimension_name] + except KeyError: + raise AttributeError(f"This Point does not have a {dimension_name} attribute.") def __setattr__(self, name, value): if name == "dimension_value_dict":
{"golden_diff": "diff --git a/source/Mlos.Python/mlos/Spaces/Point.py b/source/Mlos.Python/mlos/Spaces/Point.py\n--- a/source/Mlos.Python/mlos/Spaces/Point.py\n+++ b/source/Mlos.Python/mlos/Spaces/Point.py\n@@ -55,7 +55,10 @@\n if dimension_name == \"__isabstractmethod__\":\r\n # A sad but necessary way to deal with ABC.\r\n return False\r\n- return self[dimension_name]\r\n+ try:\r\n+ return self[dimension_name]\r\n+ except KeyError:\r\n+ raise AttributeError(f\"This Point does not have a {dimension_name} attribute.\")\r\n \r\n def __setattr__(self, name, value):\r\n if name == \"dimension_value_dict\":\n", "issue": "Sphinx Python API docs generation broken in recent nightly CI runs\nFor example: <https://github.com/microsoft/MLOS/runs/1635132574?check_suite_focus=true>\n", "before_files": [{"content": "#\r\n# Copyright (c) Microsoft Corporation.\r\n# Licensed under the MIT License.\r\n#\r\nimport json\r\nfrom numbers import Number\r\n\r\nimport pandas as pd\r\nfrom mlos.Spaces.Dimensions.Dimension import Dimension\r\n\r\n\r\nclass Point:\r\n \"\"\" Models a point in a Hypergrid.\r\n\r\n \"\"\"\r\n def __init__(self, **kwargs):\r\n self.dimension_value_dict = dict()\r\n for dimension_name, value in kwargs.items():\r\n self[dimension_name] = value\r\n\r\n def copy(self):\r\n return Point(**{key: value for key, value in self})\r\n\r\n def flat_copy(self):\r\n \"\"\" Creates a copy of the point but all dimension names are flattened.\r\n\r\n :return:\r\n \"\"\"\r\n flat_dict = {\r\n Dimension.flatten_dimension_name(dimension_name): value\r\n for dimension_name, value in self\r\n }\r\n return Point(**flat_dict)\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, Point):\r\n return False\r\n return \\\r\n all(other.get(dimension_name, None) == value for dimension_name, value in self) \\\r\n and \\\r\n all(self.get(dimension_name, None) == value for dimension_name, value in other)\r\n\r\n def __ne__(self, other):\r\n return not self == other\r\n\r\n def __iter__(self):\r\n for dimension_name, value in self.dimension_value_dict.items():\r\n if not isinstance(value, Point):\r\n yield dimension_name, value\r\n else:\r\n for sub_dimension_name, sub_dimension_value in value:\r\n yield dimension_name + \".\" + sub_dimension_name, sub_dimension_value\r\n\r\n def __getattr__(self, dimension_name):\r\n if dimension_name == \"__isabstractmethod__\":\r\n # A sad but necessary way to deal with ABC.\r\n return False\r\n return self[dimension_name]\r\n\r\n def __setattr__(self, name, value):\r\n if name == \"dimension_value_dict\":\r\n self.__dict__[name] = value\r\n else:\r\n dimension_name = name\r\n subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name)\r\n if subgrid_name is None:\r\n self.dimension_value_dict[dimension_name] = value\r\n else:\r\n point_in_subgrid = self.dimension_value_dict.get(subgrid_name, Point())\r\n point_in_subgrid[dimension_name_without_subgrid_name] = value\r\n self.dimension_value_dict[subgrid_name] = point_in_subgrid\r\n\r\n def __getitem__(self, dimension_name):\r\n if dimension_name not in self:\r\n raise KeyError(f\"This Point does not have a value along dimension: {dimension_name}\")\r\n subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name)\r\n if subgrid_name is None:\r\n return self.dimension_value_dict[dimension_name]\r\n return self[subgrid_name][dimension_name_without_subgrid_name]\r\n\r\n def get(self, dimension_name, default=None):\r\n try:\r\n return self[dimension_name]\r\n except KeyError:\r\n return default\r\n\r\n def __setitem__(self, dimension_name, value):\r\n subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name)\r\n if subgrid_name is None:\r\n self.dimension_value_dict[dimension_name] = value\r\n else:\r\n point_in_subgrid = self.dimension_value_dict.get(subgrid_name, Point())\r\n point_in_subgrid[dimension_name_without_subgrid_name] = value\r\n self.dimension_value_dict[subgrid_name] = point_in_subgrid\r\n\r\n def __contains__(self, dimension_name):\r\n subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name)\r\n if subgrid_name is None:\r\n return dimension_name in self.dimension_value_dict\r\n if subgrid_name not in self.dimension_value_dict:\r\n return False\r\n return dimension_name_without_subgrid_name in self[subgrid_name]\r\n\r\n def __repr__(self):\r\n return self.__str__()\r\n\r\n def __str__(self):\r\n return str(self.to_json(indent=2))\r\n\r\n def __getstate__(self):\r\n return self.to_json()\r\n\r\n def __setstate__(self, state):\r\n temp_point = self.from_json(state)\r\n self.dimension_value_dict = temp_point.dimension_value_dict\r\n\r\n def to_json(self, indent=None):\r\n if indent is not None:\r\n return json.dumps(self.to_dict(), indent=indent)\r\n return json.dumps(self.to_dict())\r\n\r\n @classmethod\r\n def from_json(cls, json_str):\r\n coordinates = json.loads(json_str)\r\n return Point(**coordinates)\r\n\r\n def to_dict(self):\r\n return_dict = {}\r\n for param_name, value in self:\r\n if isinstance(value, Number) and int(value) == value and not isinstance(value, bool):\r\n value = int(value)\r\n return_dict[param_name] = value\r\n return return_dict\r\n\r\n def to_dataframe(self):\r\n return pd.DataFrame({param_name: [value] for param_name, value in self})\r\n\r\n @classmethod\r\n def from_dataframe(cls, dataframe: pd.DataFrame):\r\n assert len(dataframe.index) == 1\r\n dataframe = dataframe.dropna(axis=1)\r\n dataframe_dict = dataframe.to_dict(orient='list')\r\n point_dict = {key: values[0] for key, values in dataframe_dict.items()}\r\n return Point(**point_dict)\r\n", "path": "source/Mlos.Python/mlos/Spaces/Point.py"}]}
2,041
162
gh_patches_debug_37529
rasdani/github-patches
git_diff
dmlc__dgl-5377
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Sparse] Support column-wise softmax. ## 🔨Work Item **IMPORTANT:** * This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates. * DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker. Project tracker: https://github.com/orgs/dmlc/projects/2 ## Description <!-- short description of the work item --> ## Depending work items or issues <!-- what must be done before this --> </issue> <code> [start of python/dgl/sparse/softmax.py] 1 """Softmax op for SparseMatrix""" 2 # pylint: disable=invalid-name, W0622 3 4 import torch 5 6 from .sparse_matrix import SparseMatrix 7 8 __all__ = ["softmax"] 9 10 11 def softmax(input: SparseMatrix) -> SparseMatrix: 12 """Applies row-wise softmax to the non-zero elements of the sparse matrix. 13 14 Equivalently, applies softmax to the non-zero elements of the sparse 15 matrix along the column (``dim=1``) dimension. 16 17 If :attr:`input.val` takes shape ``(nnz, D)``, then the output matrix 18 :attr:`output` and :attr:`output.val` take the same shape as :attr:`input` 19 and :attr:`input.val`. :attr:`output.val[:, i]` is calculated based on 20 :attr:`input.val[:, i]`. 21 22 Parameters 23 ---------- 24 input : SparseMatrix 25 The input sparse matrix 26 27 Returns 28 ------- 29 SparseMatrix 30 The output sparse matrix 31 32 Examples 33 -------- 34 35 Case1: matrix with values of shape (nnz) 36 37 >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]]) 38 >>> nnz = len(row) 39 >>> val = torch.arange(nnz).float() 40 >>> A = dglsp.spmatrix(indices, val) 41 >>> dglsp.softmax(A) 42 SparseMatrix(indices=tensor([[0, 0, 1, 2], 43 [1, 2, 2, 0]]), 44 values=tensor([0.2689, 0.7311, 1.0000, 1.0000]), 45 shape=(3, 3), nnz=4) 46 47 Case2: matrix with values of shape (nnz, D) 48 49 >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]]) 50 >>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]]) 51 >>> A = dglsp.spmatrix(indices, val) 52 >>> dglsp.softmax(A) 53 SparseMatrix(indices=tensor([[0, 0, 1, 2], 54 [1, 2, 2, 0]]), 55 values=tensor([[0.2689, 0.9820], 56 [0.7311, 0.0180], 57 [1.0000, 1.0000], 58 [1.0000, 1.0000]]), 59 shape=(3, 3), nnz=4, val_size=(2,)) 60 """ 61 return SparseMatrix(torch.ops.dgl_sparse.softmax(input.c_sparse_matrix)) 62 63 64 SparseMatrix.softmax = softmax 65 [end of python/dgl/sparse/softmax.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/dgl/sparse/softmax.py b/python/dgl/sparse/softmax.py --- a/python/dgl/sparse/softmax.py +++ b/python/dgl/sparse/softmax.py @@ -8,11 +8,10 @@ __all__ = ["softmax"] -def softmax(input: SparseMatrix) -> SparseMatrix: - """Applies row-wise softmax to the non-zero elements of the sparse matrix. - - Equivalently, applies softmax to the non-zero elements of the sparse - matrix along the column (``dim=1``) dimension. +def softmax(input: SparseMatrix, dim: int = 1) -> SparseMatrix: + """Applies softmax to the non-zero elements of the sparse matrix on the + dimension :attr:``dim``. dim = 0 or 1 indicates column-wise or row-wise + softmax respectively. If :attr:`input.val` takes shape ``(nnz, D)``, then the output matrix :attr:`output` and :attr:`output.val` take the same shape as :attr:`input` @@ -32,11 +31,10 @@ Examples -------- - Case1: matrix with values of shape (nnz) + Case1: row-wise softmax on matrix with values of shape (nnz) >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]]) - >>> nnz = len(row) - >>> val = torch.arange(nnz).float() + >>> val = torch.tensor([0., 1., 2., 3.]) >>> A = dglsp.spmatrix(indices, val) >>> dglsp.softmax(A) SparseMatrix(indices=tensor([[0, 0, 1, 2], @@ -44,7 +42,7 @@ values=tensor([0.2689, 0.7311, 1.0000, 1.0000]), shape=(3, 3), nnz=4) - Case2: matrix with values of shape (nnz, D) + Case2: row-wise softmax on matrix with values of shape (nnz, D) >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]]) >>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]]) @@ -57,8 +55,21 @@ [1.0000, 1.0000], [1.0000, 1.0000]]), shape=(3, 3), nnz=4, val_size=(2,)) + + Case3: column-wise softmax on matrix with values of shape (nnz) + + >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]]) + >>> val = torch.tensor([0., 1., 2., 3.]) + >>> A = dglsp.spmatrix(indices, val) + >>> dglsp.softmax(A, 0) + SparseMatrix(indices=tensor([[0, 0, 1, 2], + [1, 2, 2, 0]]), + values=tensor([1.0000, 0.2689, 0.7311, 1.0000]), + shape=(3, 3), nnz=4) """ - return SparseMatrix(torch.ops.dgl_sparse.softmax(input.c_sparse_matrix)) + return SparseMatrix( + torch.ops.dgl_sparse.softmax(input.c_sparse_matrix, dim) + ) SparseMatrix.softmax = softmax
{"golden_diff": "diff --git a/python/dgl/sparse/softmax.py b/python/dgl/sparse/softmax.py\n--- a/python/dgl/sparse/softmax.py\n+++ b/python/dgl/sparse/softmax.py\n@@ -8,11 +8,10 @@\n __all__ = [\"softmax\"]\n \n \n-def softmax(input: SparseMatrix) -> SparseMatrix:\n- \"\"\"Applies row-wise softmax to the non-zero elements of the sparse matrix.\n-\n- Equivalently, applies softmax to the non-zero elements of the sparse\n- matrix along the column (``dim=1``) dimension.\n+def softmax(input: SparseMatrix, dim: int = 1) -> SparseMatrix:\n+ \"\"\"Applies softmax to the non-zero elements of the sparse matrix on the\n+ dimension :attr:``dim``. dim = 0 or 1 indicates column-wise or row-wise\n+ softmax respectively.\n \n If :attr:`input.val` takes shape ``(nnz, D)``, then the output matrix\n :attr:`output` and :attr:`output.val` take the same shape as :attr:`input`\n@@ -32,11 +31,10 @@\n Examples\n --------\n \n- Case1: matrix with values of shape (nnz)\n+ Case1: row-wise softmax on matrix with values of shape (nnz)\n \n >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])\n- >>> nnz = len(row)\n- >>> val = torch.arange(nnz).float()\n+ >>> val = torch.tensor([0., 1., 2., 3.])\n >>> A = dglsp.spmatrix(indices, val)\n >>> dglsp.softmax(A)\n SparseMatrix(indices=tensor([[0, 0, 1, 2],\n@@ -44,7 +42,7 @@\n values=tensor([0.2689, 0.7311, 1.0000, 1.0000]),\n shape=(3, 3), nnz=4)\n \n- Case2: matrix with values of shape (nnz, D)\n+ Case2: row-wise softmax on matrix with values of shape (nnz, D)\n \n >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])\n >>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]])\n@@ -57,8 +55,21 @@\n [1.0000, 1.0000],\n [1.0000, 1.0000]]),\n shape=(3, 3), nnz=4, val_size=(2,))\n+\n+ Case3: column-wise softmax on matrix with values of shape (nnz)\n+\n+ >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])\n+ >>> val = torch.tensor([0., 1., 2., 3.])\n+ >>> A = dglsp.spmatrix(indices, val)\n+ >>> dglsp.softmax(A, 0)\n+ SparseMatrix(indices=tensor([[0, 0, 1, 2],\n+ [1, 2, 2, 0]]),\n+ values=tensor([1.0000, 0.2689, 0.7311, 1.0000]),\n+ shape=(3, 3), nnz=4)\n \"\"\"\n- return SparseMatrix(torch.ops.dgl_sparse.softmax(input.c_sparse_matrix))\n+ return SparseMatrix(\n+ torch.ops.dgl_sparse.softmax(input.c_sparse_matrix, dim)\n+ )\n \n \n SparseMatrix.softmax = softmax\n", "issue": "[Sparse] Support column-wise softmax.\n## \ud83d\udd28Work Item\r\n\r\n**IMPORTANT:**\r\n* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.\r\n* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.\r\n\r\nProject tracker: https://github.com/orgs/dmlc/projects/2\r\n\r\n## Description\r\n\r\n<!-- short description of the work item -->\r\n\r\n## Depending work items or issues\r\n\r\n<!-- what must be done before this -->\r\n\n", "before_files": [{"content": "\"\"\"Softmax op for SparseMatrix\"\"\"\n# pylint: disable=invalid-name, W0622\n\nimport torch\n\nfrom .sparse_matrix import SparseMatrix\n\n__all__ = [\"softmax\"]\n\n\ndef softmax(input: SparseMatrix) -> SparseMatrix:\n \"\"\"Applies row-wise softmax to the non-zero elements of the sparse matrix.\n\n Equivalently, applies softmax to the non-zero elements of the sparse\n matrix along the column (``dim=1``) dimension.\n\n If :attr:`input.val` takes shape ``(nnz, D)``, then the output matrix\n :attr:`output` and :attr:`output.val` take the same shape as :attr:`input`\n and :attr:`input.val`. :attr:`output.val[:, i]` is calculated based on\n :attr:`input.val[:, i]`.\n\n Parameters\n ----------\n input : SparseMatrix\n The input sparse matrix\n\n Returns\n -------\n SparseMatrix\n The output sparse matrix\n\n Examples\n --------\n\n Case1: matrix with values of shape (nnz)\n\n >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])\n >>> nnz = len(row)\n >>> val = torch.arange(nnz).float()\n >>> A = dglsp.spmatrix(indices, val)\n >>> dglsp.softmax(A)\n SparseMatrix(indices=tensor([[0, 0, 1, 2],\n [1, 2, 2, 0]]),\n values=tensor([0.2689, 0.7311, 1.0000, 1.0000]),\n shape=(3, 3), nnz=4)\n\n Case2: matrix with values of shape (nnz, D)\n\n >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])\n >>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]])\n >>> A = dglsp.spmatrix(indices, val)\n >>> dglsp.softmax(A)\n SparseMatrix(indices=tensor([[0, 0, 1, 2],\n [1, 2, 2, 0]]),\n values=tensor([[0.2689, 0.9820],\n [0.7311, 0.0180],\n [1.0000, 1.0000],\n [1.0000, 1.0000]]),\n shape=(3, 3), nnz=4, val_size=(2,))\n \"\"\"\n return SparseMatrix(torch.ops.dgl_sparse.softmax(input.c_sparse_matrix))\n\n\nSparseMatrix.softmax = softmax\n", "path": "python/dgl/sparse/softmax.py"}]}
1,422
863
gh_patches_debug_23551
rasdani/github-patches
git_diff
getnikola__nikola-1468
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> github_deploy broken if no remote `gh-pages` branch If you just started out with Nikola and your remote doesn't have a `gh-pages` branch yet (which will be the case for most people), then `github_deploy` crashes with: ``` ERROR: github_deploy: Failed GitHub deployment — command ['git', 'pull', 'origin', 'gh-pages:gh-pages'] returned 1 ``` Since `git pull origin gh-pages:gh-pages` won't work if there's no remote `gh-pages` branch. This is a big problem, because creating and pushing the blank branch is possible but nontrivial, and is one of the main automation "bonuses" of having a `nikola github_deploy` in the first place. </issue> <code> [start of nikola/plugins/command/github_deploy.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2014 Puneeth Chaganti and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 from __future__ import print_function 28 import os 29 import shutil 30 import subprocess 31 import sys 32 from textwrap import dedent 33 34 from nikola.plugin_categories import Command 35 from nikola.plugins.command.check import real_scan_files 36 from nikola.utils import ask_yesno, get_logger 37 from nikola.__main__ import main 38 from nikola import __version__ 39 40 41 def uni_check_output(*args, **kwargs): 42 o = subprocess.check_output(*args, **kwargs) 43 return o.decode('utf-8') 44 45 46 class CommandGitHubDeploy(Command): 47 """ Deploy site to GitHub pages. """ 48 name = 'github_deploy' 49 50 doc_usage = '' 51 doc_purpose = 'deploy the site to GitHub pages' 52 doc_description = dedent( 53 """\ 54 This command can be used to deploy your site to GitHub pages. 55 It performs the following actions: 56 57 1. Ensure that your site is a git repository, and git is on the PATH. 58 2. Ensure that the output directory is not committed on the 59 source branch. 60 3. Check for changes, and prompt the user to continue, if required. 61 4. Build the site 62 5. Clean any files that are "unknown" to Nikola. 63 6. Create a deploy branch, if one doesn't exist. 64 7. Commit the output to this branch. (NOTE: Any untracked source 65 files, may get committed at this stage, on the wrong branch!) 66 8. Push and deploy! 67 68 NOTE: This command needs your site to be a git repository, with a 69 master branch (or a different branch, configured using 70 GITHUB_SOURCE_BRANCH if you are pushing to user.github 71 .io/organization.github.io pages) containing the sources of your 72 site. You also, obviously, need to have `git` on your PATH, 73 and should be able to push to the repository specified as the remote 74 (origin, by default). 75 """ 76 ) 77 78 logger = None 79 80 _deploy_branch = '' 81 _source_branch = '' 82 _remote_name = '' 83 84 def _execute(self, command, args): 85 86 self.logger = get_logger( 87 CommandGitHubDeploy.name, self.site.loghandlers 88 ) 89 self._source_branch = self.site.config.get( 90 'GITHUB_SOURCE_BRANCH', 'master' 91 ) 92 self._deploy_branch = self.site.config.get( 93 'GITHUB_DEPLOY_BRANCH', 'gh-pages' 94 ) 95 self._remote_name = self.site.config.get( 96 'GITHUB_REMOTE_NAME', 'origin' 97 ) 98 99 self._ensure_git_repo() 100 101 self._exit_if_output_committed() 102 103 if not self._prompt_continue(): 104 return 105 106 build = main(['build']) 107 if build != 0: 108 self.logger.error('Build failed, not deploying to GitHub') 109 sys.exit(build) 110 111 only_on_output, _ = real_scan_files(self.site) 112 for f in only_on_output: 113 os.unlink(f) 114 115 self._checkout_deploy_branch() 116 117 self._copy_output() 118 119 self._commit_and_push() 120 121 return 122 123 def _commit_and_push(self): 124 """ Commit all the files and push. """ 125 126 deploy = self._deploy_branch 127 source = self._source_branch 128 remote = self._remote_name 129 130 source_commit = uni_check_output(['git', 'rev-parse', source]) 131 commit_message = ( 132 'Nikola auto commit.\n\n' 133 'Source commit: %s' 134 'Nikola version: %s' % (source_commit, __version__) 135 ) 136 137 commands = [ 138 ['git', 'pull', '--rebase=false', remote, '%s:%s' % (deploy, deploy)], 139 ['git', 'add', '-A'], 140 ['git', 'commit', '-m', commit_message], 141 ['git', 'push', '--force', remote, '%s:%s' % (deploy, deploy)], 142 ['git', 'checkout', source], 143 ] 144 145 for command in commands: 146 self.logger.info("==> {0}".format(command)) 147 try: 148 subprocess.check_call(command) 149 except subprocess.CalledProcessError as e: 150 self.logger.error( 151 'Failed GitHub deployment — command {0} ' 152 'returned {1}'.format(e.cmd, e.returncode) 153 ) 154 sys.exit(e.returncode) 155 156 def _copy_output(self): 157 """ Copy all output to the top level directory. """ 158 output_folder = self.site.config['OUTPUT_FOLDER'] 159 for each in os.listdir(output_folder): 160 if os.path.exists(each): 161 if os.path.isdir(each): 162 shutil.rmtree(each) 163 164 else: 165 os.unlink(each) 166 167 shutil.move(os.path.join(output_folder, each), '.') 168 169 def _checkout_deploy_branch(self): 170 """ Check out the deploy branch 171 172 Creates an orphan branch if not present. 173 174 """ 175 176 deploy = self._deploy_branch 177 178 try: 179 subprocess.check_call( 180 [ 181 'git', 'show-ref', '--verify', '--quiet', 182 'refs/heads/%s' % deploy 183 ] 184 ) 185 except subprocess.CalledProcessError: 186 self._create_orphan_deploy_branch() 187 else: 188 subprocess.check_call(['git', 'checkout', deploy]) 189 190 def _create_orphan_deploy_branch(self): 191 """ Create an orphan deploy branch """ 192 193 result = subprocess.check_call( 194 ['git', 'checkout', '--orphan', self._deploy_branch] 195 ) 196 if result != 0: 197 self.logger.error('Failed to create a deploy branch') 198 sys.exit(1) 199 200 result = subprocess.check_call(['git', 'rm', '-rf', '.']) 201 if result != 0: 202 self.logger.error('Failed to create a deploy branch') 203 sys.exit(1) 204 205 with open('.gitignore', 'w') as f: 206 f.write('%s\n' % self.site.config['OUTPUT_FOLDER']) 207 f.write('%s\n' % self.site.config['CACHE_FOLDER']) 208 f.write('*.pyc\n') 209 f.write('*.db\n') 210 211 subprocess.check_call(['git', 'add', '.gitignore']) 212 subprocess.check_call(['git', 'commit', '-m', 'Add .gitignore']) 213 214 def _ensure_git_repo(self): 215 """ Ensure that the site is a git-repo. 216 217 Also make sure that a remote with the specified name exists. 218 219 """ 220 221 try: 222 remotes = uni_check_output(['git', 'remote']) 223 except subprocess.CalledProcessError as e: 224 self.logger.notice('github_deploy needs a git repository!') 225 sys.exit(e.returncode) 226 except OSError as e: 227 import errno 228 self.logger.error('Running git failed with {0}'.format(e)) 229 if e.errno == errno.ENOENT: 230 self.logger.notice('Is git on the PATH?') 231 sys.exit(1) 232 else: 233 if self._remote_name not in remotes: 234 self.logger.error( 235 'Need a remote called "%s" configured' % self._remote_name 236 ) 237 sys.exit(1) 238 239 def _exit_if_output_committed(self): 240 """ Exit if the output folder is committed on the source branch. """ 241 242 source = self._source_branch 243 subprocess.check_call(['git', 'checkout', source]) 244 245 output_folder = self.site.config['OUTPUT_FOLDER'] 246 output_log = uni_check_output( 247 ['git', 'ls-files', '--', output_folder] 248 ) 249 250 if len(output_log.strip()) > 0: 251 self.logger.error( 252 'Output folder is committed on the source branch. ' 253 'Cannot proceed until it is removed.' 254 ) 255 sys.exit(1) 256 257 def _prompt_continue(self): 258 """ Show uncommitted changes, and ask if user wants to continue. """ 259 260 changes = uni_check_output(['git', 'status', '--porcelain']) 261 if changes.strip(): 262 changes = uni_check_output(['git', 'status']).strip() 263 message = ( 264 "You have the following changes:\n%s\n\n" 265 "Anything not committed, and unknown to Nikola may be lost, " 266 "or committed onto the wrong branch. Do you wish to continue?" 267 ) % changes 268 proceed = ask_yesno(message, False) 269 else: 270 proceed = True 271 272 return proceed 273 [end of nikola/plugins/command/github_deploy.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nikola/plugins/command/github_deploy.py b/nikola/plugins/command/github_deploy.py --- a/nikola/plugins/command/github_deploy.py +++ b/nikola/plugins/command/github_deploy.py @@ -95,6 +95,9 @@ self._remote_name = self.site.config.get( 'GITHUB_REMOTE_NAME', 'origin' ) + self._pull_before_commit = self.site.config.get( + 'GITHUB_PULL_BEFORE_COMMIT', False + ) self._ensure_git_repo() @@ -135,13 +138,15 @@ ) commands = [ - ['git', 'pull', '--rebase=false', remote, '%s:%s' % (deploy, deploy)], ['git', 'add', '-A'], ['git', 'commit', '-m', commit_message], ['git', 'push', '--force', remote, '%s:%s' % (deploy, deploy)], ['git', 'checkout', source], ] + if self._pull_before_commit: + commands.insert(0, ['git', 'pull', '--rebase=false', remote, '%s:%s' % (deploy, deploy)]) + for command in commands: self.logger.info("==> {0}".format(command)) try:
{"golden_diff": "diff --git a/nikola/plugins/command/github_deploy.py b/nikola/plugins/command/github_deploy.py\n--- a/nikola/plugins/command/github_deploy.py\n+++ b/nikola/plugins/command/github_deploy.py\n@@ -95,6 +95,9 @@\n self._remote_name = self.site.config.get(\n 'GITHUB_REMOTE_NAME', 'origin'\n )\n+ self._pull_before_commit = self.site.config.get(\n+ 'GITHUB_PULL_BEFORE_COMMIT', False\n+ )\n \n self._ensure_git_repo()\n \n@@ -135,13 +138,15 @@\n )\n \n commands = [\n- ['git', 'pull', '--rebase=false', remote, '%s:%s' % (deploy, deploy)],\n ['git', 'add', '-A'],\n ['git', 'commit', '-m', commit_message],\n ['git', 'push', '--force', remote, '%s:%s' % (deploy, deploy)],\n ['git', 'checkout', source],\n ]\n \n+ if self._pull_before_commit:\n+ commands.insert(0, ['git', 'pull', '--rebase=false', remote, '%s:%s' % (deploy, deploy)])\n+\n for command in commands:\n self.logger.info(\"==> {0}\".format(command))\n try:\n", "issue": "github_deploy broken if no remote `gh-pages` branch\nIf you just started out with Nikola and your remote doesn't have a `gh-pages` branch yet (which will be the case for most people), then `github_deploy` crashes with:\n\n```\nERROR: github_deploy: Failed GitHub deployment \u2014 command ['git', 'pull', 'origin', 'gh-pages:gh-pages'] returned 1\n```\n\nSince `git pull origin gh-pages:gh-pages` won't work if there's no remote `gh-pages` branch. This is a big problem, because creating and pushing the blank branch is possible but nontrivial, and is one of the main automation \"bonuses\" of having a `nikola github_deploy` in the first place.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2014 Puneeth Chaganti and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom textwrap import dedent\n\nfrom nikola.plugin_categories import Command\nfrom nikola.plugins.command.check import real_scan_files\nfrom nikola.utils import ask_yesno, get_logger\nfrom nikola.__main__ import main\nfrom nikola import __version__\n\n\ndef uni_check_output(*args, **kwargs):\n o = subprocess.check_output(*args, **kwargs)\n return o.decode('utf-8')\n\n\nclass CommandGitHubDeploy(Command):\n \"\"\" Deploy site to GitHub pages. \"\"\"\n name = 'github_deploy'\n\n doc_usage = ''\n doc_purpose = 'deploy the site to GitHub pages'\n doc_description = dedent(\n \"\"\"\\\n This command can be used to deploy your site to GitHub pages.\n It performs the following actions:\n\n 1. Ensure that your site is a git repository, and git is on the PATH.\n 2. Ensure that the output directory is not committed on the\n source branch.\n 3. Check for changes, and prompt the user to continue, if required.\n 4. Build the site\n 5. Clean any files that are \"unknown\" to Nikola.\n 6. Create a deploy branch, if one doesn't exist.\n 7. Commit the output to this branch. (NOTE: Any untracked source\n files, may get committed at this stage, on the wrong branch!)\n 8. Push and deploy!\n\n NOTE: This command needs your site to be a git repository, with a\n master branch (or a different branch, configured using\n GITHUB_SOURCE_BRANCH if you are pushing to user.github\n .io/organization.github.io pages) containing the sources of your\n site. You also, obviously, need to have `git` on your PATH,\n and should be able to push to the repository specified as the remote\n (origin, by default).\n \"\"\"\n )\n\n logger = None\n\n _deploy_branch = ''\n _source_branch = ''\n _remote_name = ''\n\n def _execute(self, command, args):\n\n self.logger = get_logger(\n CommandGitHubDeploy.name, self.site.loghandlers\n )\n self._source_branch = self.site.config.get(\n 'GITHUB_SOURCE_BRANCH', 'master'\n )\n self._deploy_branch = self.site.config.get(\n 'GITHUB_DEPLOY_BRANCH', 'gh-pages'\n )\n self._remote_name = self.site.config.get(\n 'GITHUB_REMOTE_NAME', 'origin'\n )\n\n self._ensure_git_repo()\n\n self._exit_if_output_committed()\n\n if not self._prompt_continue():\n return\n\n build = main(['build'])\n if build != 0:\n self.logger.error('Build failed, not deploying to GitHub')\n sys.exit(build)\n\n only_on_output, _ = real_scan_files(self.site)\n for f in only_on_output:\n os.unlink(f)\n\n self._checkout_deploy_branch()\n\n self._copy_output()\n\n self._commit_and_push()\n\n return\n\n def _commit_and_push(self):\n \"\"\" Commit all the files and push. \"\"\"\n\n deploy = self._deploy_branch\n source = self._source_branch\n remote = self._remote_name\n\n source_commit = uni_check_output(['git', 'rev-parse', source])\n commit_message = (\n 'Nikola auto commit.\\n\\n'\n 'Source commit: %s'\n 'Nikola version: %s' % (source_commit, __version__)\n )\n\n commands = [\n ['git', 'pull', '--rebase=false', remote, '%s:%s' % (deploy, deploy)],\n ['git', 'add', '-A'],\n ['git', 'commit', '-m', commit_message],\n ['git', 'push', '--force', remote, '%s:%s' % (deploy, deploy)],\n ['git', 'checkout', source],\n ]\n\n for command in commands:\n self.logger.info(\"==> {0}\".format(command))\n try:\n subprocess.check_call(command)\n except subprocess.CalledProcessError as e:\n self.logger.error(\n 'Failed GitHub deployment \u2014 command {0} '\n 'returned {1}'.format(e.cmd, e.returncode)\n )\n sys.exit(e.returncode)\n\n def _copy_output(self):\n \"\"\" Copy all output to the top level directory. \"\"\"\n output_folder = self.site.config['OUTPUT_FOLDER']\n for each in os.listdir(output_folder):\n if os.path.exists(each):\n if os.path.isdir(each):\n shutil.rmtree(each)\n\n else:\n os.unlink(each)\n\n shutil.move(os.path.join(output_folder, each), '.')\n\n def _checkout_deploy_branch(self):\n \"\"\" Check out the deploy branch\n\n Creates an orphan branch if not present.\n\n \"\"\"\n\n deploy = self._deploy_branch\n\n try:\n subprocess.check_call(\n [\n 'git', 'show-ref', '--verify', '--quiet',\n 'refs/heads/%s' % deploy\n ]\n )\n except subprocess.CalledProcessError:\n self._create_orphan_deploy_branch()\n else:\n subprocess.check_call(['git', 'checkout', deploy])\n\n def _create_orphan_deploy_branch(self):\n \"\"\" Create an orphan deploy branch \"\"\"\n\n result = subprocess.check_call(\n ['git', 'checkout', '--orphan', self._deploy_branch]\n )\n if result != 0:\n self.logger.error('Failed to create a deploy branch')\n sys.exit(1)\n\n result = subprocess.check_call(['git', 'rm', '-rf', '.'])\n if result != 0:\n self.logger.error('Failed to create a deploy branch')\n sys.exit(1)\n\n with open('.gitignore', 'w') as f:\n f.write('%s\\n' % self.site.config['OUTPUT_FOLDER'])\n f.write('%s\\n' % self.site.config['CACHE_FOLDER'])\n f.write('*.pyc\\n')\n f.write('*.db\\n')\n\n subprocess.check_call(['git', 'add', '.gitignore'])\n subprocess.check_call(['git', 'commit', '-m', 'Add .gitignore'])\n\n def _ensure_git_repo(self):\n \"\"\" Ensure that the site is a git-repo.\n\n Also make sure that a remote with the specified name exists.\n\n \"\"\"\n\n try:\n remotes = uni_check_output(['git', 'remote'])\n except subprocess.CalledProcessError as e:\n self.logger.notice('github_deploy needs a git repository!')\n sys.exit(e.returncode)\n except OSError as e:\n import errno\n self.logger.error('Running git failed with {0}'.format(e))\n if e.errno == errno.ENOENT:\n self.logger.notice('Is git on the PATH?')\n sys.exit(1)\n else:\n if self._remote_name not in remotes:\n self.logger.error(\n 'Need a remote called \"%s\" configured' % self._remote_name\n )\n sys.exit(1)\n\n def _exit_if_output_committed(self):\n \"\"\" Exit if the output folder is committed on the source branch. \"\"\"\n\n source = self._source_branch\n subprocess.check_call(['git', 'checkout', source])\n\n output_folder = self.site.config['OUTPUT_FOLDER']\n output_log = uni_check_output(\n ['git', 'ls-files', '--', output_folder]\n )\n\n if len(output_log.strip()) > 0:\n self.logger.error(\n 'Output folder is committed on the source branch. '\n 'Cannot proceed until it is removed.'\n )\n sys.exit(1)\n\n def _prompt_continue(self):\n \"\"\" Show uncommitted changes, and ask if user wants to continue. \"\"\"\n\n changes = uni_check_output(['git', 'status', '--porcelain'])\n if changes.strip():\n changes = uni_check_output(['git', 'status']).strip()\n message = (\n \"You have the following changes:\\n%s\\n\\n\"\n \"Anything not committed, and unknown to Nikola may be lost, \"\n \"or committed onto the wrong branch. Do you wish to continue?\"\n ) % changes\n proceed = ask_yesno(message, False)\n else:\n proceed = True\n\n return proceed\n", "path": "nikola/plugins/command/github_deploy.py"}]}
3,444
284
gh_patches_debug_20857
rasdani/github-patches
git_diff
bridgecrewio__checkov-3127
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> baseline output can change resource order for each run If I generate a baseline file and I have then made some improvements to my Terraform code and I run the baseline again. What I am finding is that the order of the resources for each file can often change which then shows up as a diff against the prevous baseline file - when in reality nothing has change but the order of the resources in the findings array in the baseline output file I was wondering could the findings array just be sorted before being output? Then the resource order should be fixed and any actual diffs should be real changes to check_ids (which is sorted already) or new resources being added? e.g. this is a diff from two runs of generating a baseline file nothing has actually change just resources moved around in the array. ``` @@ -100,13 +100,12 @@ "file": "/main.tf", "findings": [ { - "resource": "aws_s3_bucket.canary_artifacts", + "resource": "aws_s3_bucket.backups", "check_ids": [ "CKV2_AWS_6", "CKV_AWS_144", "CKV_AWS_145", - "CKV_AWS_18", - "CKV_AWS_21" + "CKV_AWS_18" ] }, { @@ -119,12 +118,13 @@ ] }, { - "resource": "aws_s3_bucket.lambdas", + "resource": "aws_s3_bucket.canary_artifacts", "check_ids": [ "CKV2_AWS_6", "CKV_AWS_144", "CKV_AWS_145", - "CKV_AWS_18" + "CKV_AWS_18", + "CKV_AWS_21" ] }, { @@ -137,7 +137,7 @@ ] }, { - "resource": "aws_s3_bucket.backups", + "resource": "aws_s3_bucket.lambdas", "check_ids": [ "CKV2_AWS_6", "CKV_AWS_144", ``` </issue> <code> [start of checkov/common/output/baseline.py] 1 from __future__ import annotations 2 3 import json 4 from collections import defaultdict 5 from checkov.common.models.enums import CheckResult 6 from typing import Any, TYPE_CHECKING 7 8 if TYPE_CHECKING: 9 from checkov.common.output.record import Record 10 from checkov.common.output.report import Report 11 from checkov.common.typing import _BaselineFinding, _BaselineFailedChecks 12 13 14 class Baseline: 15 def __init__(self, output_skipped: bool = False) -> None: 16 self.path = "" 17 self.path_failed_checks_map: dict[str, list[_BaselineFinding]] = defaultdict(list) 18 self.failed_checks: list[_BaselineFailedChecks] = [] 19 self.output_skipped = output_skipped 20 21 def add_findings_from_report(self, report: Report) -> None: 22 for check in report.failed_checks: 23 try: 24 existing = next( 25 x for x in self.path_failed_checks_map[check.file_path] if x["resource"] == check.resource 26 ) 27 except StopIteration: 28 existing = {"resource": check.resource, "check_ids": []} 29 self.path_failed_checks_map[check.file_path].append(existing) 30 existing["check_ids"].append(check.check_id) 31 existing["check_ids"].sort() # Sort the check IDs to be nicer to the eye 32 33 def to_dict(self) -> dict[str, Any]: 34 """ 35 The output of this class needs to be very explicit, hence the following structure of the dict: 36 { 37 "failed_checks": [ 38 { 39 "file": "path/to/file", 40 "findings: [ 41 { 42 "resource": "aws_s3_bucket.this", 43 "check_ids": [ 44 "CKV_AWS_1", 45 "CKV_AWS_2", 46 "CKV_AWS_3" 47 ] 48 } 49 ] 50 } 51 ] 52 } 53 """ 54 failed_checks_list = [] 55 for file, findings in self.path_failed_checks_map.items(): 56 formatted_findings = [] 57 for finding in findings: 58 formatted_findings.append({"resource": finding["resource"], "check_ids": finding["check_ids"]}) 59 failed_checks_list.append({"file": file, "findings": formatted_findings}) 60 61 resp = {"failed_checks": failed_checks_list} 62 return resp 63 64 def compare_and_reduce_reports(self, scan_reports: list[Report]) -> None: 65 for scan_report in scan_reports: 66 scan_report.passed_checks = [ 67 check for check in scan_report.passed_checks if self._is_check_in_baseline(check) 68 ] 69 scan_report.skipped_checks = [ 70 check for check in scan_report.skipped_checks if self._is_check_in_baseline(check) 71 ] 72 if self.output_skipped: 73 for check in scan_report.failed_checks: 74 if self._is_check_in_baseline(check): 75 check.check_result["suppress_comment"] = "baseline-skipped" 76 check.check_result["result"] = CheckResult.SKIPPED 77 scan_report.skipped_checks.append(check) 78 scan_report.failed_checks = [ 79 check for check in scan_report.failed_checks if not self._is_check_in_baseline(check) 80 ] 81 82 def _is_check_in_baseline(self, check: Record) -> bool: 83 failed_check_id = check.check_id 84 failed_check_resource = check.resource 85 for baseline_failed_check in self.failed_checks: 86 for finding in baseline_failed_check["findings"]: 87 if finding["resource"] == failed_check_resource and failed_check_id in finding["check_ids"]: 88 return True 89 return False 90 91 def from_json(self, file_path: str) -> None: 92 self.path = file_path 93 with open(file_path, "r") as f: 94 baseline_raw = json.load(f) 95 self.failed_checks = baseline_raw.get("failed_checks", {}) 96 [end of checkov/common/output/baseline.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/common/output/baseline.py b/checkov/common/output/baseline.py --- a/checkov/common/output/baseline.py +++ b/checkov/common/output/baseline.py @@ -2,6 +2,8 @@ import json from collections import defaultdict +from operator import itemgetter + from checkov.common.models.enums import CheckResult from typing import Any, TYPE_CHECKING @@ -56,9 +58,9 @@ formatted_findings = [] for finding in findings: formatted_findings.append({"resource": finding["resource"], "check_ids": finding["check_ids"]}) - failed_checks_list.append({"file": file, "findings": formatted_findings}) + failed_checks_list.append({"file": file, "findings": sorted(formatted_findings, key=itemgetter("resource"))}) - resp = {"failed_checks": failed_checks_list} + resp = {"failed_checks": sorted(failed_checks_list, key=itemgetter("file"))} return resp def compare_and_reduce_reports(self, scan_reports: list[Report]) -> None:
{"golden_diff": "diff --git a/checkov/common/output/baseline.py b/checkov/common/output/baseline.py\n--- a/checkov/common/output/baseline.py\n+++ b/checkov/common/output/baseline.py\n@@ -2,6 +2,8 @@\n \n import json\n from collections import defaultdict\n+from operator import itemgetter\n+\n from checkov.common.models.enums import CheckResult\n from typing import Any, TYPE_CHECKING\n \n@@ -56,9 +58,9 @@\n formatted_findings = []\n for finding in findings:\n formatted_findings.append({\"resource\": finding[\"resource\"], \"check_ids\": finding[\"check_ids\"]})\n- failed_checks_list.append({\"file\": file, \"findings\": formatted_findings})\n+ failed_checks_list.append({\"file\": file, \"findings\": sorted(formatted_findings, key=itemgetter(\"resource\"))})\n \n- resp = {\"failed_checks\": failed_checks_list}\n+ resp = {\"failed_checks\": sorted(failed_checks_list, key=itemgetter(\"file\"))}\n return resp\n \n def compare_and_reduce_reports(self, scan_reports: list[Report]) -> None:\n", "issue": "baseline output can change resource order for each run\nIf I generate a baseline file and I have then made some improvements to my Terraform code and I run the baseline again. What I am finding is that the order of the resources for each file can often change which then shows up as a diff against the prevous baseline file - when in reality nothing has change but the order of the resources in the findings array in the baseline output file \r\n\r\nI was wondering could the findings array just be sorted before being output? Then the resource order should be fixed and any actual diffs should be real changes to check_ids (which is sorted already) or new resources being added?\r\n\r\ne.g. this is a diff from two runs of generating a baseline file nothing has actually change just resources moved around in the array.\r\n\r\n```\r\n@@ -100,13 +100,12 @@\r\n \"file\": \"/main.tf\",\r\n \"findings\": [\r\n {\r\n- \"resource\": \"aws_s3_bucket.canary_artifacts\",\r\n+ \"resource\": \"aws_s3_bucket.backups\",\r\n \"check_ids\": [\r\n \"CKV2_AWS_6\",\r\n \"CKV_AWS_144\",\r\n \"CKV_AWS_145\",\r\n- \"CKV_AWS_18\",\r\n- \"CKV_AWS_21\"\r\n+ \"CKV_AWS_18\"\r\n ]\r\n },\r\n {\r\n@@ -119,12 +118,13 @@\r\n ]\r\n },\r\n {\r\n- \"resource\": \"aws_s3_bucket.lambdas\",\r\n+ \"resource\": \"aws_s3_bucket.canary_artifacts\",\r\n \"check_ids\": [\r\n \"CKV2_AWS_6\",\r\n \"CKV_AWS_144\",\r\n \"CKV_AWS_145\",\r\n- \"CKV_AWS_18\"\r\n+ \"CKV_AWS_18\",\r\n+ \"CKV_AWS_21\"\r\n ]\r\n },\r\n {\r\n@@ -137,7 +137,7 @@\r\n ]\r\n },\r\n {\r\n- \"resource\": \"aws_s3_bucket.backups\",\r\n+ \"resource\": \"aws_s3_bucket.lambdas\",\r\n \"check_ids\": [\r\n \"CKV2_AWS_6\",\r\n \"CKV_AWS_144\",\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport json\nfrom collections import defaultdict\nfrom checkov.common.models.enums import CheckResult\nfrom typing import Any, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from checkov.common.output.record import Record\n from checkov.common.output.report import Report\n from checkov.common.typing import _BaselineFinding, _BaselineFailedChecks\n\n\nclass Baseline:\n def __init__(self, output_skipped: bool = False) -> None:\n self.path = \"\"\n self.path_failed_checks_map: dict[str, list[_BaselineFinding]] = defaultdict(list)\n self.failed_checks: list[_BaselineFailedChecks] = []\n self.output_skipped = output_skipped\n\n def add_findings_from_report(self, report: Report) -> None:\n for check in report.failed_checks:\n try:\n existing = next(\n x for x in self.path_failed_checks_map[check.file_path] if x[\"resource\"] == check.resource\n )\n except StopIteration:\n existing = {\"resource\": check.resource, \"check_ids\": []}\n self.path_failed_checks_map[check.file_path].append(existing)\n existing[\"check_ids\"].append(check.check_id)\n existing[\"check_ids\"].sort() # Sort the check IDs to be nicer to the eye\n\n def to_dict(self) -> dict[str, Any]:\n \"\"\"\n The output of this class needs to be very explicit, hence the following structure of the dict:\n {\n \"failed_checks\": [\n {\n \"file\": \"path/to/file\",\n \"findings: [\n {\n \"resource\": \"aws_s3_bucket.this\",\n \"check_ids\": [\n \"CKV_AWS_1\",\n \"CKV_AWS_2\",\n \"CKV_AWS_3\"\n ]\n }\n ]\n }\n ]\n }\n \"\"\"\n failed_checks_list = []\n for file, findings in self.path_failed_checks_map.items():\n formatted_findings = []\n for finding in findings:\n formatted_findings.append({\"resource\": finding[\"resource\"], \"check_ids\": finding[\"check_ids\"]})\n failed_checks_list.append({\"file\": file, \"findings\": formatted_findings})\n\n resp = {\"failed_checks\": failed_checks_list}\n return resp\n\n def compare_and_reduce_reports(self, scan_reports: list[Report]) -> None:\n for scan_report in scan_reports:\n scan_report.passed_checks = [\n check for check in scan_report.passed_checks if self._is_check_in_baseline(check)\n ]\n scan_report.skipped_checks = [\n check for check in scan_report.skipped_checks if self._is_check_in_baseline(check)\n ]\n if self.output_skipped:\n for check in scan_report.failed_checks:\n if self._is_check_in_baseline(check):\n check.check_result[\"suppress_comment\"] = \"baseline-skipped\"\n check.check_result[\"result\"] = CheckResult.SKIPPED\n scan_report.skipped_checks.append(check)\n scan_report.failed_checks = [\n check for check in scan_report.failed_checks if not self._is_check_in_baseline(check)\n ]\n\n def _is_check_in_baseline(self, check: Record) -> bool:\n failed_check_id = check.check_id\n failed_check_resource = check.resource\n for baseline_failed_check in self.failed_checks:\n for finding in baseline_failed_check[\"findings\"]:\n if finding[\"resource\"] == failed_check_resource and failed_check_id in finding[\"check_ids\"]:\n return True\n return False\n\n def from_json(self, file_path: str) -> None:\n self.path = file_path\n with open(file_path, \"r\") as f:\n baseline_raw = json.load(f)\n self.failed_checks = baseline_raw.get(\"failed_checks\", {})\n", "path": "checkov/common/output/baseline.py"}]}
2,023
235
gh_patches_debug_34812
rasdani/github-patches
git_diff
wagtail__wagtail-10961
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Avoid purging Revisions in use by third-party packages I originally raised this as part of some rework on the `purge_revisions` management command in https://github.com/wagtail/wagtail/pull/10619#pullrequestreview-1528373346. We considered it out of scope to fix the issue in that PR since Wagtail has always worked like this. Hence I'm raising it again separately in this issue. ### Is your proposal related to a problem? Wagtail provides a `purge_revisions` management command to delete stale `Revision` objects from the database. This command is aware of revisions that are still in use by Wagtail's workflow feature and avoids deleting those revisions ([link to source](https://github.com/wagtail/wagtail/blob/3d0282573d0d23f9eabe2fb6427e58b01f79ffe0/wagtail/management/commands/purge_revisions.py#L61-L66)). However, it is somewhat common for third-party packages to rely on a specific revision. [wagtail-ab-testing](https://github.com/wagtail-nest/wagtail-ab-testing/blob/d04bd0032c4335a839c781851b12fe16037950de/wagtail_ab_testing/models.py#L76) is a real world example of this. It maintains a ForeignKey to a specific revision to track an AB experiment. If `purge_revisions` were to remove a revision in use by wagtail-ab-testing, experiment data loss would occur. Data loss can be prevented by specifying the ForeignKey with `on_delete=models.PROTECT` but `purge_revisions` wouldn't be able to handle this and crash with a `ProtectedError`. This means third-party packages have no recourse: either they allow the revisions they depend on to be deleted or they mark their relations as protected and break Wagtails `purge_revisions` command. ### Describe the solution you'd like I propose `purge_revisions` gracefully recovers from `ProtectedError` when trying to delete a revision. I also propose we document that relations to a `Revision` should be marked with `on_delete=models.PROTECT` if data loss is not desirable. I also feel like the `purge_revisions` command should report the number of revisions that it couldn't delete because they were protected. ### Describe alternatives you've considered An alternative mechanism I considered would be an API that allows packages to inform Wagtail which revisions they still use. The `purge_revisions` command can take this information into account and avoid revisions still in use. This is a more complex solution with no apparent benefit over the solution described above. Avoid purging Revisions in use by third-party packages I originally raised this as part of some rework on the `purge_revisions` management command in https://github.com/wagtail/wagtail/pull/10619#pullrequestreview-1528373346. We considered it out of scope to fix the issue in that PR since Wagtail has always worked like this. Hence I'm raising it again separately in this issue. ### Is your proposal related to a problem? Wagtail provides a `purge_revisions` management command to delete stale `Revision` objects from the database. This command is aware of revisions that are still in use by Wagtail's workflow feature and avoids deleting those revisions ([link to source](https://github.com/wagtail/wagtail/blob/3d0282573d0d23f9eabe2fb6427e58b01f79ffe0/wagtail/management/commands/purge_revisions.py#L61-L66)). However, it is somewhat common for third-party packages to rely on a specific revision. [wagtail-ab-testing](https://github.com/wagtail-nest/wagtail-ab-testing/blob/d04bd0032c4335a839c781851b12fe16037950de/wagtail_ab_testing/models.py#L76) is a real world example of this. It maintains a ForeignKey to a specific revision to track an AB experiment. If `purge_revisions` were to remove a revision in use by wagtail-ab-testing, experiment data loss would occur. Data loss can be prevented by specifying the ForeignKey with `on_delete=models.PROTECT` but `purge_revisions` wouldn't be able to handle this and crash with a `ProtectedError`. This means third-party packages have no recourse: either they allow the revisions they depend on to be deleted or they mark their relations as protected and break Wagtails `purge_revisions` command. ### Describe the solution you'd like I propose `purge_revisions` gracefully recovers from `ProtectedError` when trying to delete a revision. I also propose we document that relations to a `Revision` should be marked with `on_delete=models.PROTECT` if data loss is not desirable. I also feel like the `purge_revisions` command should report the number of revisions that it couldn't delete because they were protected. ### Describe alternatives you've considered An alternative mechanism I considered would be an API that allows packages to inform Wagtail which revisions they still use. The `purge_revisions` command can take this information into account and avoid revisions still in use. This is a more complex solution with no apparent benefit over the solution described above. </issue> <code> [start of wagtail/management/commands/purge_revisions.py] 1 from django.conf import settings 2 from django.core.management.base import BaseCommand 3 from django.db.models import Q 4 from django.utils import timezone 5 6 from wagtail.models import Revision, WorkflowState 7 8 9 class Command(BaseCommand): 10 help = "Delete revisions which are not the latest revision, published or scheduled to be published, or in moderation" 11 12 def add_arguments(self, parser): 13 parser.add_argument( 14 "--days", 15 type=int, 16 help="Only delete revisions older than this number of days", 17 ) 18 parser.add_argument( 19 "--pages", 20 action="store_true", 21 help="Only delete revisions of page models", 22 ) 23 parser.add_argument( 24 "--non-pages", 25 action="store_true", 26 help="Only delete revisions of non-page models", 27 ) 28 29 def handle(self, *args, **options): 30 days = options.get("days") 31 pages = options.get("pages") 32 non_pages = options.get("non_pages") 33 34 revisions_deleted = purge_revisions(days=days, pages=pages, non_pages=non_pages) 35 36 if revisions_deleted: 37 self.stdout.write( 38 self.style.SUCCESS( 39 "Successfully deleted %s revisions" % revisions_deleted 40 ) 41 ) 42 else: 43 self.stdout.write("No revisions deleted") 44 45 46 def purge_revisions(days=None, pages=True, non_pages=True): 47 if pages == non_pages: 48 # If both are True or both are False, purge revisions of pages and non-pages 49 objects = Revision.objects.all() 50 elif pages: 51 objects = Revision.objects.page_revisions() 52 elif non_pages: 53 objects = Revision.objects.not_page_revisions() 54 55 # exclude revisions which have been submitted for moderation in the old system 56 # RemovedInWagtail60Warning 57 # Remove this when the deprecation period for the legacy 58 # moderation system ends. 59 purgeable_revisions = objects.exclude(submitted_for_moderation=True).exclude( 60 # and exclude revisions with an approved_go_live_at date 61 approved_go_live_at__isnull=False 62 ) 63 64 if getattr(settings, "WAGTAIL_WORKFLOW_ENABLED", True): 65 purgeable_revisions = purgeable_revisions.exclude( 66 # and exclude revisions linked to an in progress or needs changes workflow state 67 Q(task_states__workflow_state__status=WorkflowState.STATUS_IN_PROGRESS) 68 | Q(task_states__workflow_state__status=WorkflowState.STATUS_NEEDS_CHANGES) 69 ) 70 71 if days: 72 purgeable_until = timezone.now() - timezone.timedelta(days=days) 73 # only include revisions which were created before the cut off date 74 purgeable_revisions = purgeable_revisions.filter(created_at__lt=purgeable_until) 75 76 deleted_revisions_count = 0 77 78 for revision in purgeable_revisions.iterator(): 79 # don't delete the latest revision 80 if not revision.is_latest_revision(): 81 revision.delete() 82 deleted_revisions_count += 1 83 84 return deleted_revisions_count 85 [end of wagtail/management/commands/purge_revisions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/management/commands/purge_revisions.py b/wagtail/management/commands/purge_revisions.py --- a/wagtail/management/commands/purge_revisions.py +++ b/wagtail/management/commands/purge_revisions.py @@ -1,6 +1,7 @@ from django.conf import settings from django.core.management.base import BaseCommand from django.db.models import Q +from django.db.models.deletion import ProtectedError from django.utils import timezone from wagtail.models import Revision, WorkflowState @@ -31,7 +32,9 @@ pages = options.get("pages") non_pages = options.get("non_pages") - revisions_deleted = purge_revisions(days=days, pages=pages, non_pages=non_pages) + revisions_deleted, protected_error_count = purge_revisions( + days=days, pages=pages, non_pages=non_pages + ) if revisions_deleted: self.stdout.write( @@ -39,6 +42,12 @@ "Successfully deleted %s revisions" % revisions_deleted ) ) + self.stdout.write( + self.style.SUCCESS( + "Ignored %s revisions because one or more protected relations exist that prevent deletion." + % protected_error_count + ) + ) else: self.stdout.write("No revisions deleted") @@ -74,11 +83,15 @@ purgeable_revisions = purgeable_revisions.filter(created_at__lt=purgeable_until) deleted_revisions_count = 0 + protected_error_count = 0 for revision in purgeable_revisions.iterator(): # don't delete the latest revision if not revision.is_latest_revision(): - revision.delete() - deleted_revisions_count += 1 + try: + revision.delete() + deleted_revisions_count += 1 + except ProtectedError: + protected_error_count += 1 - return deleted_revisions_count + return deleted_revisions_count, protected_error_count
{"golden_diff": "diff --git a/wagtail/management/commands/purge_revisions.py b/wagtail/management/commands/purge_revisions.py\n--- a/wagtail/management/commands/purge_revisions.py\n+++ b/wagtail/management/commands/purge_revisions.py\n@@ -1,6 +1,7 @@\n from django.conf import settings\n from django.core.management.base import BaseCommand\n from django.db.models import Q\n+from django.db.models.deletion import ProtectedError\n from django.utils import timezone\n \n from wagtail.models import Revision, WorkflowState\n@@ -31,7 +32,9 @@\n pages = options.get(\"pages\")\n non_pages = options.get(\"non_pages\")\n \n- revisions_deleted = purge_revisions(days=days, pages=pages, non_pages=non_pages)\n+ revisions_deleted, protected_error_count = purge_revisions(\n+ days=days, pages=pages, non_pages=non_pages\n+ )\n \n if revisions_deleted:\n self.stdout.write(\n@@ -39,6 +42,12 @@\n \"Successfully deleted %s revisions\" % revisions_deleted\n )\n )\n+ self.stdout.write(\n+ self.style.SUCCESS(\n+ \"Ignored %s revisions because one or more protected relations exist that prevent deletion.\"\n+ % protected_error_count\n+ )\n+ )\n else:\n self.stdout.write(\"No revisions deleted\")\n \n@@ -74,11 +83,15 @@\n purgeable_revisions = purgeable_revisions.filter(created_at__lt=purgeable_until)\n \n deleted_revisions_count = 0\n+ protected_error_count = 0\n \n for revision in purgeable_revisions.iterator():\n # don't delete the latest revision\n if not revision.is_latest_revision():\n- revision.delete()\n- deleted_revisions_count += 1\n+ try:\n+ revision.delete()\n+ deleted_revisions_count += 1\n+ except ProtectedError:\n+ protected_error_count += 1\n \n- return deleted_revisions_count\n+ return deleted_revisions_count, protected_error_count\n", "issue": "Avoid purging Revisions in use by third-party packages\nI originally raised this as part of some rework on the `purge_revisions` management command in https://github.com/wagtail/wagtail/pull/10619#pullrequestreview-1528373346. We considered it out of scope to fix the issue in that PR since Wagtail has always worked like this. Hence I'm raising it again separately in this issue.\r\n\r\n### Is your proposal related to a problem?\r\n\r\nWagtail provides a `purge_revisions` management command to delete stale `Revision` objects from the database. This command is aware of revisions that are still in use by Wagtail's workflow feature and avoids deleting those revisions ([link to source](https://github.com/wagtail/wagtail/blob/3d0282573d0d23f9eabe2fb6427e58b01f79ffe0/wagtail/management/commands/purge_revisions.py#L61-L66)).\r\n\r\nHowever, it is somewhat common for third-party packages to rely on a specific revision. [wagtail-ab-testing](https://github.com/wagtail-nest/wagtail-ab-testing/blob/d04bd0032c4335a839c781851b12fe16037950de/wagtail_ab_testing/models.py#L76) is a real world example of this. It maintains a ForeignKey to a specific revision to track an AB experiment. If `purge_revisions` were to remove a revision in use by wagtail-ab-testing, experiment data loss would occur.\r\n\r\nData loss can be prevented by specifying the ForeignKey with `on_delete=models.PROTECT` but `purge_revisions` wouldn't be able to handle this and crash with a `ProtectedError`. This means third-party packages have no recourse: either they allow the revisions they depend on to be deleted or they mark their relations as protected and break Wagtails `purge_revisions` command.\r\n\r\n### Describe the solution you'd like\r\n\r\nI propose `purge_revisions` gracefully recovers from `ProtectedError` when trying to delete a revision. I also propose we document that relations to a `Revision` should be marked with `on_delete=models.PROTECT` if data loss is not desirable.\r\n\r\nI also feel like the `purge_revisions` command should report the number of revisions that it couldn't delete because they were protected.\r\n\r\n### Describe alternatives you've considered\r\n\r\nAn alternative mechanism I considered would be an API that allows packages to inform Wagtail which revisions they still use. The `purge_revisions` command can take this information into account and avoid revisions still in use.\r\n\r\nThis is a more complex solution with no apparent benefit over the solution described above.\nAvoid purging Revisions in use by third-party packages\nI originally raised this as part of some rework on the `purge_revisions` management command in https://github.com/wagtail/wagtail/pull/10619#pullrequestreview-1528373346. We considered it out of scope to fix the issue in that PR since Wagtail has always worked like this. Hence I'm raising it again separately in this issue.\r\n\r\n### Is your proposal related to a problem?\r\n\r\nWagtail provides a `purge_revisions` management command to delete stale `Revision` objects from the database. This command is aware of revisions that are still in use by Wagtail's workflow feature and avoids deleting those revisions ([link to source](https://github.com/wagtail/wagtail/blob/3d0282573d0d23f9eabe2fb6427e58b01f79ffe0/wagtail/management/commands/purge_revisions.py#L61-L66)).\r\n\r\nHowever, it is somewhat common for third-party packages to rely on a specific revision. [wagtail-ab-testing](https://github.com/wagtail-nest/wagtail-ab-testing/blob/d04bd0032c4335a839c781851b12fe16037950de/wagtail_ab_testing/models.py#L76) is a real world example of this. It maintains a ForeignKey to a specific revision to track an AB experiment. If `purge_revisions` were to remove a revision in use by wagtail-ab-testing, experiment data loss would occur.\r\n\r\nData loss can be prevented by specifying the ForeignKey with `on_delete=models.PROTECT` but `purge_revisions` wouldn't be able to handle this and crash with a `ProtectedError`. This means third-party packages have no recourse: either they allow the revisions they depend on to be deleted or they mark their relations as protected and break Wagtails `purge_revisions` command.\r\n\r\n### Describe the solution you'd like\r\n\r\nI propose `purge_revisions` gracefully recovers from `ProtectedError` when trying to delete a revision. I also propose we document that relations to a `Revision` should be marked with `on_delete=models.PROTECT` if data loss is not desirable.\r\n\r\nI also feel like the `purge_revisions` command should report the number of revisions that it couldn't delete because they were protected.\r\n\r\n### Describe alternatives you've considered\r\n\r\nAn alternative mechanism I considered would be an API that allows packages to inform Wagtail which revisions they still use. The `purge_revisions` command can take this information into account and avoid revisions still in use.\r\n\r\nThis is a more complex solution with no apparent benefit over the solution described above.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Q\nfrom django.utils import timezone\n\nfrom wagtail.models import Revision, WorkflowState\n\n\nclass Command(BaseCommand):\n help = \"Delete revisions which are not the latest revision, published or scheduled to be published, or in moderation\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--days\",\n type=int,\n help=\"Only delete revisions older than this number of days\",\n )\n parser.add_argument(\n \"--pages\",\n action=\"store_true\",\n help=\"Only delete revisions of page models\",\n )\n parser.add_argument(\n \"--non-pages\",\n action=\"store_true\",\n help=\"Only delete revisions of non-page models\",\n )\n\n def handle(self, *args, **options):\n days = options.get(\"days\")\n pages = options.get(\"pages\")\n non_pages = options.get(\"non_pages\")\n\n revisions_deleted = purge_revisions(days=days, pages=pages, non_pages=non_pages)\n\n if revisions_deleted:\n self.stdout.write(\n self.style.SUCCESS(\n \"Successfully deleted %s revisions\" % revisions_deleted\n )\n )\n else:\n self.stdout.write(\"No revisions deleted\")\n\n\ndef purge_revisions(days=None, pages=True, non_pages=True):\n if pages == non_pages:\n # If both are True or both are False, purge revisions of pages and non-pages\n objects = Revision.objects.all()\n elif pages:\n objects = Revision.objects.page_revisions()\n elif non_pages:\n objects = Revision.objects.not_page_revisions()\n\n # exclude revisions which have been submitted for moderation in the old system\n # RemovedInWagtail60Warning\n # Remove this when the deprecation period for the legacy\n # moderation system ends.\n purgeable_revisions = objects.exclude(submitted_for_moderation=True).exclude(\n # and exclude revisions with an approved_go_live_at date\n approved_go_live_at__isnull=False\n )\n\n if getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True):\n purgeable_revisions = purgeable_revisions.exclude(\n # and exclude revisions linked to an in progress or needs changes workflow state\n Q(task_states__workflow_state__status=WorkflowState.STATUS_IN_PROGRESS)\n | Q(task_states__workflow_state__status=WorkflowState.STATUS_NEEDS_CHANGES)\n )\n\n if days:\n purgeable_until = timezone.now() - timezone.timedelta(days=days)\n # only include revisions which were created before the cut off date\n purgeable_revisions = purgeable_revisions.filter(created_at__lt=purgeable_until)\n\n deleted_revisions_count = 0\n\n for revision in purgeable_revisions.iterator():\n # don't delete the latest revision\n if not revision.is_latest_revision():\n revision.delete()\n deleted_revisions_count += 1\n\n return deleted_revisions_count\n", "path": "wagtail/management/commands/purge_revisions.py"}]}
2,531
450
gh_patches_debug_10241
rasdani/github-patches
git_diff
rootpy__rootpy-748
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error when using root_open: 'TDirectory' object has no attribute 'func' As above: `AttributeError: 'TDirectory' object has no attribute 'func'` </issue> <code> [start of rootpy/ROOT.py] 1 # Copyright 2012 the rootpy developers 2 # distributed under the terms of the GNU General Public License 3 """ 4 :py:mod:`rootpy.ROOT` 5 ===================== 6 7 This module is intended to be a drop-in replacement for ordinary 8 PyROOT imports by mimicking PyROOT's interface. If you find a case where it is 9 not, please report an issue to the rootpy developers. 10 11 Both ROOT and rootpy classes can be accessed in a harmonized way through this 12 module. This means you can take advantage of rootpy classes automatically by 13 replacing ``import ROOT`` with ``import rootpy.ROOT as ROOT`` or 14 ``from rootpy import ROOT`` in your code, while maintaining backward 15 compatibility with existing use of ROOT's classes. 16 17 ROOT classes are automatically "asrootpy'd" *after* the constructor in ROOT has 18 been called: 19 20 .. sourcecode:: python 21 22 >>> import rootpy.ROOT as ROOT 23 >>> h = ROOT.TH1F('name', 'title', 10, 0, 1) 24 >>> h 25 Hist('name') 26 >>> h.TYPE 27 'F' 28 29 Also access rootpy classes under this same module without needing to remember 30 where to import them from in rootpy: 31 32 .. sourcecode:: python 33 34 >>> import rootpy.ROOT as ROOT 35 >>> h = ROOT.Hist(10, 0, 1, name='name', type='F') 36 >>> h 37 Hist('name') 38 >>> h.TYPE 39 'F' 40 41 Plain old ROOT can still be accessed through the ``R`` property: 42 43 .. sourcecode:: python 44 45 >>> from rootpy import ROOT 46 >>> ROOT.R.TFile 47 <class 'ROOT.TFile'> 48 49 """ 50 from __future__ import absolute_import 51 52 from copy import copy 53 54 import ROOT 55 56 from . import asrootpy, lookup_rootpy, ROOT_VERSION 57 from . import QROOT, stl 58 from .utils.module_facade import Facade 59 60 __all__ = [] 61 62 63 def proxy_global(name, no_expand_macro=False): 64 """ 65 Used to automatically asrootpy ROOT's thread local variables 66 """ 67 if no_expand_macro: # pragma: no cover 68 # handle older ROOT versions without _ExpandMacroFunction wrapping 69 @property 70 def gSomething_no_func(self): 71 glob = self(getattr(ROOT, name)) 72 # create a fake func() that just returns self 73 def func(): 74 return glob 75 glob.func = func 76 return glob 77 return gSomething_no_func 78 79 @property 80 def gSomething(self): 81 glob = getattr(ROOT, name) 82 orig_func = glob.func 83 84 def asrootpy_izing_func(): 85 return self(orig_func()) 86 87 # new_glob = copy(glob) 88 new_glob = glob.__class__.__new__(glob.__class__) 89 new_glob.func = asrootpy_izing_func 90 # Memoize 91 setattr(type(self), name, new_glob) 92 return new_glob 93 return gSomething 94 95 96 @Facade(__name__, expose_internal=False) 97 class Module(object): 98 99 __version__ = ROOT_VERSION 100 101 def __call__(self, arg, after_init=False): 102 return asrootpy(arg, warn=False, after_init=after_init) 103 104 def __getattr__(self, what): 105 try: 106 # check ROOT 107 result = self(getattr(ROOT, what), after_init=True) 108 except AttributeError: 109 # check rootpy 110 result = lookup_rootpy(what) 111 if result is None: 112 raise AttributeError( 113 'ROOT does not have the attribute `{0}` ' 114 'and rootpy does not contain the class `{0}`'.format(what)) 115 return result 116 117 try: 118 # Memoize 119 setattr(self, what, result) 120 except AttributeError: 121 # Oops... Oh well. I tried. 122 pass 123 124 return result 125 126 @property 127 def R(self): 128 return ROOT 129 130 gPad = proxy_global("gPad") 131 gVirtualX = proxy_global("gVirtualX") 132 133 if ROOT_VERSION < (5, 32, 0): # pragma: no cover 134 # handle versions of ROOT older than 5.32.00 135 gDirectory = proxy_global("gDirectory", no_expand_macro=True) 136 gFile = proxy_global("gFile", no_expand_macro=True) 137 gInterpreter = proxy_global("gInterpreter", no_expand_macro=True) 138 else: 139 gDirectory = proxy_global("gDirectory") 140 gFile = proxy_global("gFile") 141 gInterpreter = proxy_global("gInterpreter") 142 143 # use the smart template STL types from rootpy.stl instead 144 for t in QROOT.std.stlclasses: 145 locals()[t] = getattr(stl, t) 146 del t 147 [end of rootpy/ROOT.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rootpy/ROOT.py b/rootpy/ROOT.py --- a/rootpy/ROOT.py +++ b/rootpy/ROOT.py @@ -130,8 +130,7 @@ gPad = proxy_global("gPad") gVirtualX = proxy_global("gVirtualX") - if ROOT_VERSION < (5, 32, 0): # pragma: no cover - # handle versions of ROOT older than 5.32.00 + if ROOT_VERSION < (5, 32, 0) or ROOT_VERSION >= (6, 9, 2): # pragma: no cover gDirectory = proxy_global("gDirectory", no_expand_macro=True) gFile = proxy_global("gFile", no_expand_macro=True) gInterpreter = proxy_global("gInterpreter", no_expand_macro=True)
{"golden_diff": "diff --git a/rootpy/ROOT.py b/rootpy/ROOT.py\n--- a/rootpy/ROOT.py\n+++ b/rootpy/ROOT.py\n@@ -130,8 +130,7 @@\n gPad = proxy_global(\"gPad\")\n gVirtualX = proxy_global(\"gVirtualX\")\n \n- if ROOT_VERSION < (5, 32, 0): # pragma: no cover\n- # handle versions of ROOT older than 5.32.00\n+ if ROOT_VERSION < (5, 32, 0) or ROOT_VERSION >= (6, 9, 2): # pragma: no cover\n gDirectory = proxy_global(\"gDirectory\", no_expand_macro=True)\n gFile = proxy_global(\"gFile\", no_expand_macro=True)\n gInterpreter = proxy_global(\"gInterpreter\", no_expand_macro=True)\n", "issue": "Error when using root_open: 'TDirectory' object has no attribute 'func'\nAs above:\r\n\r\n`AttributeError: 'TDirectory' object has no attribute 'func'`\n", "before_files": [{"content": "# Copyright 2012 the rootpy developers\n# distributed under the terms of the GNU General Public License\n\"\"\"\n:py:mod:`rootpy.ROOT`\n=====================\n\nThis module is intended to be a drop-in replacement for ordinary\nPyROOT imports by mimicking PyROOT's interface. If you find a case where it is\nnot, please report an issue to the rootpy developers.\n\nBoth ROOT and rootpy classes can be accessed in a harmonized way through this\nmodule. This means you can take advantage of rootpy classes automatically by\nreplacing ``import ROOT`` with ``import rootpy.ROOT as ROOT`` or\n``from rootpy import ROOT`` in your code, while maintaining backward\ncompatibility with existing use of ROOT's classes.\n\nROOT classes are automatically \"asrootpy'd\" *after* the constructor in ROOT has\nbeen called:\n\n.. sourcecode:: python\n\n >>> import rootpy.ROOT as ROOT\n >>> h = ROOT.TH1F('name', 'title', 10, 0, 1)\n >>> h\n Hist('name')\n >>> h.TYPE\n 'F'\n\nAlso access rootpy classes under this same module without needing to remember\nwhere to import them from in rootpy:\n\n.. sourcecode:: python\n\n >>> import rootpy.ROOT as ROOT\n >>> h = ROOT.Hist(10, 0, 1, name='name', type='F')\n >>> h\n Hist('name')\n >>> h.TYPE\n 'F'\n\nPlain old ROOT can still be accessed through the ``R`` property:\n\n.. sourcecode:: python\n\n >>> from rootpy import ROOT\n >>> ROOT.R.TFile\n <class 'ROOT.TFile'>\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom copy import copy\n\nimport ROOT\n\nfrom . import asrootpy, lookup_rootpy, ROOT_VERSION\nfrom . import QROOT, stl\nfrom .utils.module_facade import Facade\n\n__all__ = []\n\n\ndef proxy_global(name, no_expand_macro=False):\n \"\"\"\n Used to automatically asrootpy ROOT's thread local variables\n \"\"\"\n if no_expand_macro: # pragma: no cover\n # handle older ROOT versions without _ExpandMacroFunction wrapping\n @property\n def gSomething_no_func(self):\n glob = self(getattr(ROOT, name))\n # create a fake func() that just returns self\n def func():\n return glob\n glob.func = func\n return glob\n return gSomething_no_func\n\n @property\n def gSomething(self):\n glob = getattr(ROOT, name)\n orig_func = glob.func\n\n def asrootpy_izing_func():\n return self(orig_func())\n\n # new_glob = copy(glob)\n new_glob = glob.__class__.__new__(glob.__class__)\n new_glob.func = asrootpy_izing_func\n # Memoize\n setattr(type(self), name, new_glob)\n return new_glob\n return gSomething\n\n\n@Facade(__name__, expose_internal=False)\nclass Module(object):\n\n __version__ = ROOT_VERSION\n\n def __call__(self, arg, after_init=False):\n return asrootpy(arg, warn=False, after_init=after_init)\n\n def __getattr__(self, what):\n try:\n # check ROOT\n result = self(getattr(ROOT, what), after_init=True)\n except AttributeError:\n # check rootpy\n result = lookup_rootpy(what)\n if result is None:\n raise AttributeError(\n 'ROOT does not have the attribute `{0}` '\n 'and rootpy does not contain the class `{0}`'.format(what))\n return result\n\n try:\n # Memoize\n setattr(self, what, result)\n except AttributeError:\n # Oops... Oh well. I tried.\n pass\n\n return result\n\n @property\n def R(self):\n return ROOT\n\n gPad = proxy_global(\"gPad\")\n gVirtualX = proxy_global(\"gVirtualX\")\n\n if ROOT_VERSION < (5, 32, 0): # pragma: no cover\n # handle versions of ROOT older than 5.32.00\n gDirectory = proxy_global(\"gDirectory\", no_expand_macro=True)\n gFile = proxy_global(\"gFile\", no_expand_macro=True)\n gInterpreter = proxy_global(\"gInterpreter\", no_expand_macro=True)\n else:\n gDirectory = proxy_global(\"gDirectory\")\n gFile = proxy_global(\"gFile\")\n gInterpreter = proxy_global(\"gInterpreter\")\n\n # use the smart template STL types from rootpy.stl instead\n for t in QROOT.std.stlclasses:\n locals()[t] = getattr(stl, t)\n del t\n", "path": "rootpy/ROOT.py"}]}
1,938
191
gh_patches_debug_22709
rasdani/github-patches
git_diff
sopel-irc__sopel-2494
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Root module description is a mini-rant about LC_ALL rather than a description of the library ### Description Looking at the `sopel` module with `pydoc` in an interactive prompt) exposes the user to [a short rant](https://github.com/sopel-irc/sopel/blob/c26914b68913bc25bdd1f5fed9c5942a87fdfee6/sopel/__init__.py#L1-L4) about the behavior of `LC_ALL` and instructions to use only ASCII in this module. I'm sympathetic to the frustration over #984 that led to this, but it will be an improvement to add a docstring to the module with a short description. ### Reproduction steps Run `python3 -m pydoc sopel` or `import sopel; help(sopel)` in an interactive prompt. ### Expected behavior The user should see a short description of Sopel ### Relevant logs _No response_ ### Notes _No response_ ### Sopel version c26914b ### Installation method `pip install` ### Python version _No response_ ### Operating system _No response_ ### IRCd _No response_ ### Relevant plugins _No response_ </issue> <code> [start of sopel/__init__.py] 1 # ASCII ONLY IN THIS FILE THOUGH!!!!!!! 2 # Python does some stupid bullshit of respecting LC_ALL over the encoding on the 3 # file, so in order to undo Python's ridiculous fucking idiocy, we have to have 4 # our own check. 5 6 # Copyright 2008, Sean B. Palmer, inamidst.com 7 # Copyright 2012, Elsie Powell, http://embolalia.com 8 # Copyright 2012, Elad Alfassa <elad@fedoraproject.org> 9 # 10 # Licensed under the Eiffel Forum License 2. 11 12 from __future__ import annotations 13 14 from collections import namedtuple 15 import locale 16 import re 17 import sys 18 19 # TODO: replace with stdlib importlib.metadata when dropping py3.7 20 # version info used in this module works from py3.8+ 21 import importlib_metadata 22 23 __all__ = [ 24 'bot', 25 'config', 26 'db', 27 'formatting', 28 'irc', 29 'loader', 30 'logger', 31 'module', # deprecated in 7.1, removed in 9.0 32 'plugin', 33 'tools', 34 'trigger', 35 'version_info', 36 ] 37 38 loc = locale.getlocale() 39 if not loc[1] or ('UTF-8' not in loc[1] and 'utf8' not in loc[1]): 40 print('WARNING!!! You are running with a non-UTF8 locale environment ' 41 'variable (e.g. LC_ALL is set to "C"), which makes Python 3 do ' 42 'stupid things. If you get strange errors, please set it to ' 43 'something like "en_US.UTF-8".', file=sys.stderr) 44 45 46 __version__ = importlib_metadata.version('sopel') 47 48 49 def _version_info(version=__version__): 50 regex = re.compile(r'(\d+)\.(\d+)\.(\d+)(?:[\-\.]?(a|b|rc)(\d+))?.*') 51 version_match = regex.match(version) 52 53 if version_match is None: 54 raise RuntimeError("Can't parse version number!") 55 56 version_groups = version_match.groups() 57 major, minor, micro = (int(piece) for piece in version_groups[0:3]) 58 level = version_groups[3] 59 serial = int(version_groups[4] or 0) 60 if level == 'a': 61 level = 'alpha' 62 elif level == 'b': 63 level = 'beta' 64 elif level == 'rc': 65 level = 'candidate' 66 elif not level and version_groups[4] is None: 67 level = 'final' 68 else: 69 level = 'alpha' 70 71 VersionInfo = namedtuple('VersionInfo', 72 'major, minor, micro, releaselevel, serial') 73 return VersionInfo(major, minor, micro, level, serial) 74 75 76 version_info = _version_info() 77 [end of sopel/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sopel/__init__.py b/sopel/__init__.py --- a/sopel/__init__.py +++ b/sopel/__init__.py @@ -1,8 +1,9 @@ -# ASCII ONLY IN THIS FILE THOUGH!!!!!!! -# Python does some stupid bullshit of respecting LC_ALL over the encoding on the -# file, so in order to undo Python's ridiculous fucking idiocy, we have to have -# our own check. +""" +Sopel is a simple, easy-to-use, open-source IRC utility bot, written in Python. +It’s designed to be easy to use, easy to run, and easy to extend. +""" +# # Copyright 2008, Sean B. Palmer, inamidst.com # Copyright 2012, Elsie Powell, http://embolalia.com # Copyright 2012, Elad Alfassa <elad@fedoraproject.org> @@ -37,9 +38,8 @@ loc = locale.getlocale() if not loc[1] or ('UTF-8' not in loc[1] and 'utf8' not in loc[1]): - print('WARNING!!! You are running with a non-UTF8 locale environment ' - 'variable (e.g. LC_ALL is set to "C"), which makes Python 3 do ' - 'stupid things. If you get strange errors, please set it to ' + print('Warning: Running with a non-UTF8 locale. If you see strange ' + 'encoding errors, try setting the LC_ALL environment variable to ' 'something like "en_US.UTF-8".', file=sys.stderr)
{"golden_diff": "diff --git a/sopel/__init__.py b/sopel/__init__.py\n--- a/sopel/__init__.py\n+++ b/sopel/__init__.py\n@@ -1,8 +1,9 @@\n-# ASCII ONLY IN THIS FILE THOUGH!!!!!!!\n-# Python does some stupid bullshit of respecting LC_ALL over the encoding on the\n-# file, so in order to undo Python's ridiculous fucking idiocy, we have to have\n-# our own check.\n+\"\"\"\n+Sopel is a simple, easy-to-use, open-source IRC utility bot, written in Python.\n \n+It\u2019s designed to be easy to use, easy to run, and easy to extend.\n+\"\"\"\n+#\n # Copyright 2008, Sean B. Palmer, inamidst.com\n # Copyright 2012, Elsie Powell, http://embolalia.com\n # Copyright 2012, Elad Alfassa <elad@fedoraproject.org>\n@@ -37,9 +38,8 @@\n \n loc = locale.getlocale()\n if not loc[1] or ('UTF-8' not in loc[1] and 'utf8' not in loc[1]):\n- print('WARNING!!! You are running with a non-UTF8 locale environment '\n- 'variable (e.g. LC_ALL is set to \"C\"), which makes Python 3 do '\n- 'stupid things. If you get strange errors, please set it to '\n+ print('Warning: Running with a non-UTF8 locale. If you see strange '\n+ 'encoding errors, try setting the LC_ALL environment variable to '\n 'something like \"en_US.UTF-8\".', file=sys.stderr)\n", "issue": "Root module description is a mini-rant about LC_ALL rather than a description of the library\n### Description\n\nLooking at the `sopel` module with `pydoc` in an interactive prompt) exposes the user to [a short rant](https://github.com/sopel-irc/sopel/blob/c26914b68913bc25bdd1f5fed9c5942a87fdfee6/sopel/__init__.py#L1-L4) about the behavior of `LC_ALL` and instructions to use only ASCII in this module.\r\n\r\nI'm sympathetic to the frustration over #984 that led to this, but it will be an improvement to add a docstring to the module with a short description.\n\n### Reproduction steps\n\nRun `python3 -m pydoc sopel` or `import sopel; help(sopel)` in an interactive prompt.\n\n### Expected behavior\n\nThe user should see a short description of Sopel\n\n### Relevant logs\n\n_No response_\n\n### Notes\n\n_No response_\n\n### Sopel version\n\nc26914b\n\n### Installation method\n\n`pip install`\n\n### Python version\n\n_No response_\n\n### Operating system\n\n_No response_\n\n### IRCd\n\n_No response_\n\n### Relevant plugins\n\n_No response_\n", "before_files": [{"content": "# ASCII ONLY IN THIS FILE THOUGH!!!!!!!\n# Python does some stupid bullshit of respecting LC_ALL over the encoding on the\n# file, so in order to undo Python's ridiculous fucking idiocy, we have to have\n# our own check.\n\n# Copyright 2008, Sean B. Palmer, inamidst.com\n# Copyright 2012, Elsie Powell, http://embolalia.com\n# Copyright 2012, Elad Alfassa <elad@fedoraproject.org>\n#\n# Licensed under the Eiffel Forum License 2.\n\nfrom __future__ import annotations\n\nfrom collections import namedtuple\nimport locale\nimport re\nimport sys\n\n# TODO: replace with stdlib importlib.metadata when dropping py3.7\n# version info used in this module works from py3.8+\nimport importlib_metadata\n\n__all__ = [\n 'bot',\n 'config',\n 'db',\n 'formatting',\n 'irc',\n 'loader',\n 'logger',\n 'module', # deprecated in 7.1, removed in 9.0\n 'plugin',\n 'tools',\n 'trigger',\n 'version_info',\n]\n\nloc = locale.getlocale()\nif not loc[1] or ('UTF-8' not in loc[1] and 'utf8' not in loc[1]):\n print('WARNING!!! You are running with a non-UTF8 locale environment '\n 'variable (e.g. LC_ALL is set to \"C\"), which makes Python 3 do '\n 'stupid things. If you get strange errors, please set it to '\n 'something like \"en_US.UTF-8\".', file=sys.stderr)\n\n\n__version__ = importlib_metadata.version('sopel')\n\n\ndef _version_info(version=__version__):\n regex = re.compile(r'(\\d+)\\.(\\d+)\\.(\\d+)(?:[\\-\\.]?(a|b|rc)(\\d+))?.*')\n version_match = regex.match(version)\n\n if version_match is None:\n raise RuntimeError(\"Can't parse version number!\")\n\n version_groups = version_match.groups()\n major, minor, micro = (int(piece) for piece in version_groups[0:3])\n level = version_groups[3]\n serial = int(version_groups[4] or 0)\n if level == 'a':\n level = 'alpha'\n elif level == 'b':\n level = 'beta'\n elif level == 'rc':\n level = 'candidate'\n elif not level and version_groups[4] is None:\n level = 'final'\n else:\n level = 'alpha'\n\n VersionInfo = namedtuple('VersionInfo',\n 'major, minor, micro, releaselevel, serial')\n return VersionInfo(major, minor, micro, level, serial)\n\n\nversion_info = _version_info()\n", "path": "sopel/__init__.py"}]}
1,575
370
gh_patches_debug_30963
rasdani/github-patches
git_diff
bridgecrewio__checkov-5638
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_AZURE_226: error in check and testcase **Describe the issue** CKV_AZURE_226 checks for ephemeral disks within the "main resource" azurerm_kubernetes_cluster but the cluster itself doesn't have any argument called os_disk_type. The argument os_disk_type is part of the node pool. The testcase [here](https://github.com/bridgecrewio/checkov/pull/5584/files#diff-c0b8f08537766f6eff2a5d10b9439d227fdaaebe6ff7903008825c5f9d51c22dR1) is misleading and the check itself [here](https://github.com/bridgecrewio/checkov/pull/5584/files#diff-c9248390aa120f7af4643f1908d3d824fb903fd3c6cd63e9e77fe8e9ecd59289R28) too. In my opinion this must be something like ``` def get_inspected_key(self) -> str: return "default_node_pool/[0]/os_disk_type" ``` otherwise it won't work? Same for CKV_AZURE_227. **Examples** ``` [root] # head -30 aks.tf resource "azurerm_kubernetes_cluster" "this" { name = local.name_prefix location = var.resource_group.location resource_group_name = var.resource_group.name node_resource_group = "${local.name_prefix}-node-pool" dns_prefix = local.name_prefix kubernetes_version = local.kubernetes_version sku_tier = var.sku_tier api_server_access_profile { authorized_ip_ranges = var.api_server_authorized_ip_ranges } default_node_pool { name = "default" enable_host_encryption = true vm_size = "Standard_E4ads_v5" os_disk_type = "Ephemeral" zones = [1, 2, 3] only_critical_addons_enabled = true type = "VirtualMachineScaleSets" vnet_subnet_id = var.subnet_id enable_auto_scaling = true max_count = 6 min_count = 2 orchestrator_version = local.kubernetes_version upgrade_settings { ``` results in ``` [root] # checkov --skip-framework kubernetes --skip-framework helm --quiet --compact -o junitxml -o cli --directory . 2023-10-02 11:58:47,399 [MainThread ] [WARNI] The framework "sca_image" is part of the "SCA" module, which is not enabled in the platform 2023-10-02 11:58:47,399 [MainThread ] [WARNI] The framework "sca_package" is part of the "SCA" module, which is not enabled in the platform terraform scan results: Passed checks: 6, Failed checks: 11, Skipped checks: 0 [...] Check: CKV_AZURE_226: "Ensure ephemeral disks are used for OS disks" FAILED for resource: azurerm_kubernetes_cluster.this File: /aks.tf:1-64 Check: CKV_AZURE_227: "Ensure that the AKS cluster encrypt temp disks, caches, and data flows between Compute and Storage resources" FAILED for resource: azurerm_kubernetes_cluster.this File: /aks.tf:1-64 [...] ``` Please also see https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster for code example. **Version (please complete the following information):** - Checkov Version 2.4.58 **Additional context** This is related to https://github.com/bridgecrewio/checkov/pull/5584 and https://github.com/bridgecrewio/checkov/pull/5588. </issue> <code> [start of checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py] 1 from checkov.common.models.enums import CheckCategories 2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 3 from typing import Any 4 5 6 class AKSEphemeralOSDisks(BaseResourceValueCheck): 7 def __init__(self) -> None: 8 """ 9 Temporary data can contain sensitive data at some points, by using ephemeral disks, 10 we ensure that data written to OS disk is stored on local VM storage and isn't persisted to Azure Storage 11 12 Azure automatically replicates data stored in the managed OS disk of a virtual machine to Azure storage 13 to avoid data loss in case the virtual machine needs to be relocated to another host. 14 Generally speaking, containers are not designed to have local state persisted to the managed OS disk, 15 hence this behavior offers limited value to AKS hosted while providing some drawbacks, 16 including slower node provisioning and higher read/write latency. 17 18 Ephemeral disks allow us also to have faster cluster operations like scale or upgrade 19 due to faster re-imaging and boot times. 20 """ 21 name = "Ensure ephemeral disks are used for OS disks" 22 id = "CKV_AZURE_226" 23 supported_resources = ("azurerm_kubernetes_cluster",) 24 categories = (CheckCategories.KUBERNETES,) 25 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 26 27 def get_inspected_key(self) -> str: 28 return "os_disk_type" 29 30 def get_expected_value(self) -> Any: 31 return "Ephemeral" 32 33 34 check = AKSEphemeralOSDisks() 35 [end of checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py] [start of checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py] 1 2 from checkov.common.models.enums import CheckCategories, CheckResult 3 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 4 5 6 class AKSEncryptionAtHostEnabled(BaseResourceValueCheck): 7 def __init__(self) -> None: 8 """ 9 With host-based encryption, the data stored on the VM host of 10 your AKS agent nodes' VMs is encrypted at rest and flows encrypted to the Storage service. 11 12 This means the temp disks are encrypted at rest with platform-managed keys. 13 The cache of OS and data disks is encrypted at rest with either platform-managed keys 14 or customer-managed keys depending on the encryption type set on those disks. 15 """ 16 name = "Ensure that the AKS cluster encrypt temp disks, caches, and data flows " 17 name += "between Compute and Storage resources" 18 id = "CKV_AZURE_227" 19 supported_resources = ("azurerm_kubernetes_cluster", "azurerm_kubernetes_cluster_node_pool") 20 categories = (CheckCategories.KUBERNETES,) 21 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources, 22 missing_block_result=CheckResult.FAILED) 23 24 def get_inspected_key(self) -> str: 25 return "enable_host_encryption" 26 27 28 check = AKSEncryptionAtHostEnabled() 29 [end of checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py b/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py --- a/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py +++ b/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py @@ -1,4 +1,3 @@ - from checkov.common.models.enums import CheckCategories, CheckResult from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck @@ -18,11 +17,19 @@ id = "CKV_AZURE_227" supported_resources = ("azurerm_kubernetes_cluster", "azurerm_kubernetes_cluster_node_pool") categories = (CheckCategories.KUBERNETES,) - super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources, - missing_block_result=CheckResult.FAILED) + super().__init__( + name=name, + id=id, + categories=categories, + supported_resources=supported_resources, + missing_block_result=CheckResult.FAILED, + ) def get_inspected_key(self) -> str: - return "enable_host_encryption" + if self.entity_type == "azurerm_kubernetes_cluster": + return "default_node_pool/[0]/enable_host_encryption" + else: + return "enable_host_encryption" check = AKSEncryptionAtHostEnabled() diff --git a/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py b/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py --- a/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py +++ b/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py @@ -25,7 +25,7 @@ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def get_inspected_key(self) -> str: - return "os_disk_type" + return "default_node_pool/[0]/os_disk_type" def get_expected_value(self) -> Any: return "Ephemeral"
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py b/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py\n--- a/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py\n+++ b/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py\n@@ -1,4 +1,3 @@\n-\n from checkov.common.models.enums import CheckCategories, CheckResult\n from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n \n@@ -18,11 +17,19 @@\n id = \"CKV_AZURE_227\"\n supported_resources = (\"azurerm_kubernetes_cluster\", \"azurerm_kubernetes_cluster_node_pool\")\n categories = (CheckCategories.KUBERNETES,)\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,\n- missing_block_result=CheckResult.FAILED)\n+ super().__init__(\n+ name=name,\n+ id=id,\n+ categories=categories,\n+ supported_resources=supported_resources,\n+ missing_block_result=CheckResult.FAILED,\n+ )\n \n def get_inspected_key(self) -> str:\n- return \"enable_host_encryption\"\n+ if self.entity_type == \"azurerm_kubernetes_cluster\":\n+ return \"default_node_pool/[0]/enable_host_encryption\"\n+ else:\n+ return \"enable_host_encryption\"\n \n \n check = AKSEncryptionAtHostEnabled()\ndiff --git a/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py b/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py\n--- a/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py\n+++ b/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py\n@@ -25,7 +25,7 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def get_inspected_key(self) -> str:\n- return \"os_disk_type\"\n+ return \"default_node_pool/[0]/os_disk_type\"\n \n def get_expected_value(self) -> Any:\n return \"Ephemeral\"\n", "issue": "CKV_AZURE_226: error in check and testcase\n**Describe the issue**\r\nCKV_AZURE_226 checks for ephemeral disks within the \"main resource\" azurerm_kubernetes_cluster but the cluster itself doesn't have any argument called os_disk_type. The argument os_disk_type is part of the node pool. \r\nThe testcase [here](https://github.com/bridgecrewio/checkov/pull/5584/files#diff-c0b8f08537766f6eff2a5d10b9439d227fdaaebe6ff7903008825c5f9d51c22dR1) is misleading and the check itself [here](https://github.com/bridgecrewio/checkov/pull/5584/files#diff-c9248390aa120f7af4643f1908d3d824fb903fd3c6cd63e9e77fe8e9ecd59289R28) too. \r\n\r\nIn my opinion this must be something like \r\n```\r\n def get_inspected_key(self) -> str:\r\n return \"default_node_pool/[0]/os_disk_type\"\r\n```\r\notherwise it won't work?\r\n\r\nSame for CKV_AZURE_227.\r\n\r\n**Examples**\r\n```\r\n[root] # head -30 aks.tf\r\nresource \"azurerm_kubernetes_cluster\" \"this\" {\r\n name = local.name_prefix\r\n location = var.resource_group.location\r\n resource_group_name = var.resource_group.name\r\n node_resource_group = \"${local.name_prefix}-node-pool\"\r\n dns_prefix = local.name_prefix\r\n kubernetes_version = local.kubernetes_version\r\n sku_tier = var.sku_tier\r\n\r\n api_server_access_profile {\r\n authorized_ip_ranges = var.api_server_authorized_ip_ranges\r\n }\r\n\r\n default_node_pool {\r\n name = \"default\"\r\n\r\n enable_host_encryption = true\r\n vm_size = \"Standard_E4ads_v5\"\r\n os_disk_type = \"Ephemeral\"\r\n zones = [1, 2, 3]\r\n only_critical_addons_enabled = true\r\n\r\n type = \"VirtualMachineScaleSets\"\r\n vnet_subnet_id = var.subnet_id\r\n enable_auto_scaling = true\r\n max_count = 6\r\n min_count = 2\r\n orchestrator_version = local.kubernetes_version\r\n\r\n upgrade_settings {\r\n```\r\n\r\nresults in\r\n```\r\n[root] # checkov --skip-framework kubernetes --skip-framework helm --quiet --compact -o junitxml -o cli --directory .\r\n2023-10-02 11:58:47,399 [MainThread ] [WARNI] The framework \"sca_image\" is part of the \"SCA\" module, which is not enabled in the platform\r\n2023-10-02 11:58:47,399 [MainThread ] [WARNI] The framework \"sca_package\" is part of the \"SCA\" module, which is not enabled in the platform\r\nterraform scan results:\r\n\r\nPassed checks: 6, Failed checks: 11, Skipped checks: 0\r\n\r\n[...]\r\nCheck: CKV_AZURE_226: \"Ensure ephemeral disks are used for OS disks\"\r\n FAILED for resource: azurerm_kubernetes_cluster.this\r\n File: /aks.tf:1-64\r\nCheck: CKV_AZURE_227: \"Ensure that the AKS cluster encrypt temp disks, caches, and data flows between Compute and Storage resources\"\r\n FAILED for resource: azurerm_kubernetes_cluster.this\r\n File: /aks.tf:1-64\r\n[...]\r\n```\r\n\r\nPlease also see https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster for code example.\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.4.58\r\n\r\n**Additional context**\r\nThis is related to https://github.com/bridgecrewio/checkov/pull/5584 and https://github.com/bridgecrewio/checkov/pull/5588.\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom typing import Any\n\n\nclass AKSEphemeralOSDisks(BaseResourceValueCheck):\n def __init__(self) -> None:\n \"\"\"\n Temporary data can contain sensitive data at some points, by using ephemeral disks,\n we ensure that data written to OS disk is stored on local VM storage and isn't persisted to Azure Storage\n\n Azure automatically replicates data stored in the managed OS disk of a virtual machine to Azure storage\n to avoid data loss in case the virtual machine needs to be relocated to another host.\n Generally speaking, containers are not designed to have local state persisted to the managed OS disk,\n hence this behavior offers limited value to AKS hosted while providing some drawbacks,\n including slower node provisioning and higher read/write latency.\n\n Ephemeral disks allow us also to have faster cluster operations like scale or upgrade\n due to faster re-imaging and boot times.\n \"\"\"\n name = \"Ensure ephemeral disks are used for OS disks\"\n id = \"CKV_AZURE_226\"\n supported_resources = (\"azurerm_kubernetes_cluster\",)\n categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self) -> str:\n return \"os_disk_type\"\n\n def get_expected_value(self) -> Any:\n return \"Ephemeral\"\n\n\ncheck = AKSEphemeralOSDisks()\n", "path": "checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py"}, {"content": "\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AKSEncryptionAtHostEnabled(BaseResourceValueCheck):\n def __init__(self) -> None:\n \"\"\"\n With host-based encryption, the data stored on the VM host of\n your AKS agent nodes' VMs is encrypted at rest and flows encrypted to the Storage service.\n\n This means the temp disks are encrypted at rest with platform-managed keys.\n The cache of OS and data disks is encrypted at rest with either platform-managed keys\n or customer-managed keys depending on the encryption type set on those disks.\n \"\"\"\n name = \"Ensure that the AKS cluster encrypt temp disks, caches, and data flows \"\n name += \"between Compute and Storage resources\"\n id = \"CKV_AZURE_227\"\n supported_resources = (\"azurerm_kubernetes_cluster\", \"azurerm_kubernetes_cluster_node_pool\")\n categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,\n missing_block_result=CheckResult.FAILED)\n\n def get_inspected_key(self) -> str:\n return \"enable_host_encryption\"\n\n\ncheck = AKSEncryptionAtHostEnabled()\n", "path": "checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py"}]}
2,284
511
gh_patches_debug_22890
rasdani/github-patches
git_diff
bokeh__bokeh-4524
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Undeprecate VBox and HBox I would like names `VBox` and `HBox` to stay, because they are more meaningful to from UI point of view than the new counterparts. </issue> <code> [start of bokeh/models/layouts.py] 1 """ Various kinds of layout components. 2 3 """ 4 from __future__ import absolute_import 5 6 import warnings 7 import logging 8 logger = logging.getLogger(__name__) 9 10 from ..core import validation 11 12 from ..core.validation.warnings import ( 13 EMPTY_LAYOUT, 14 BOTH_CHILD_AND_ROOT, 15 ) 16 from ..core.enums import Location, Responsive as ResponsiveEnum 17 from ..core.properties import abstract, Bool, Int, Instance, List, Responsive, Override 18 from ..embed import notebook_div 19 from ..model import Model 20 from ..util.deprecate import deprecated 21 22 23 @abstract 24 class LayoutDOM(Model): 25 """ An abstract base class for layout components. ``LayoutDOM`` is not 26 generally useful to instantiate on its own. 27 28 """ 29 30 width = Int(help=""" 31 An optional width for the component (in pixels). 32 """) 33 34 height = Int(help=""" 35 An optional height for the component (in pixels). 36 """) 37 38 disabled = Bool(False, help=""" 39 Whether the widget will be disabled when rendered. If ``True``, 40 the widget will be greyed-out, and not respond to UI events. 41 """) 42 43 responsive = Responsive(help=""" 44 The type of responsiveness for the item being displayed. Possible values are 45 ``"fixed"`` (or ``False``), ``"scale_width"`` (or ``True``), 46 ``"scale_height"``, ``"scale_both"``, ``"stretch_both"``. 47 48 ``"stretch_both"`` elements are completely responsive (independently in width and height) and 49 will resize to occupy all available space, even if this changes the aspect ratio of the element. 50 This is sometimes called outside-in, and is a typical behavior for desktop applications. 51 52 ``"fixed"`` elements are not responsive. They will retain their original width and height 53 regardless of any subsequent browser window resize events. 54 55 ``"scale_width"`` elements will responsively resize to fit to the width available, *while 56 maintaining the original aspect ratio*. This is a typical behavior for modern websites. For a 57 ``Plot``, the aspect ratio ``plot_width/plot_height`` is maintained. 58 59 ``"scale_height"`` elements will responsively resize to fit to the height available, *while 60 maintaining the original aspect ratio*. For a ``Plot``, the aspect ratio 61 ``plot_width/plot_height`` is maintained. A plot with ``"scale_height"`` mode needs 62 to be wrapped in a ``Row`` or ``Column`` to be responsive. 63 64 65 ``"scale_both"`` elements will responsively resize to fir both the width and height available, 66 *while maintaining the original aspect ratio*. 67 68 """) 69 70 # TODO: (mp) Not yet, because it breaks plotting/notebook examples. 71 # Rename to _repr_html_ if we decide to enable this by default. 72 def __repr_html__(self): 73 return notebook_div(self) 74 75 @property 76 def html(self): 77 from IPython.core.display import HTML 78 return HTML(self.__repr_html__()) 79 80 81 class Spacer(LayoutDOM): 82 """ A container for space used to fill an empty spot in a row or column. 83 84 """ 85 86 87 class WidgetBox(LayoutDOM): 88 """ A container for widgets that are part of a layout.""" 89 def __init__(self, *args, **kwargs): 90 if len(args) > 0 and "children" in kwargs: 91 raise ValueError("'children' keyword cannot be used with positional arguments") 92 elif len(args) > 0: 93 kwargs["children"] = list(args) 94 super(WidgetBox, self).__init__(**kwargs) 95 96 @validation.warning(EMPTY_LAYOUT) 97 def _check_empty_layout(self): 98 from itertools import chain 99 if not list(chain(self.children)): 100 return str(self) 101 102 @validation.warning(BOTH_CHILD_AND_ROOT) 103 def _check_child_is_also_root(self): 104 problems = [] 105 for c in self.children: 106 if c.document is not None and c in c.document.roots: 107 problems.append(str(c)) 108 if problems: 109 return ", ".join(problems) 110 else: 111 return None 112 113 children = List(Instance('bokeh.models.widgets.Widget'), help=""" 114 The list of widgets to put in the layout box. 115 """) 116 117 responsive = Override(default='fixed') 118 119 120 @abstract 121 class Box(LayoutDOM): 122 """ Abstract base class for Row and Column. Do not use directly. 123 """ 124 125 def __init__(self, *args, **kwargs): 126 127 if len(args) > 0 and "children" in kwargs: 128 raise ValueError("'children' keyword cannot be used with positional arguments") 129 elif len(args) > 0: 130 kwargs["children"] = list(args) 131 132 unwrapped_children = kwargs.get("children", []) 133 kwargs["children"] = self._wrap_children(unwrapped_children) 134 super(Box, self).__init__(**kwargs) 135 136 def _wrap_children(self, children): 137 """ Wrap any Widgets of a list of child layouts in a WidgetBox. 138 This allows for the convenience of just spelling Row(button1, button2). 139 """ 140 from .widgets.widget import Widget 141 wrapped_children = [] 142 for child in children: 143 if isinstance(child, Widget): 144 child = WidgetBox( 145 children=[child], 146 responsive=child.responsive, 147 width=child.width, 148 height=child.height, 149 disabled=child.disabled 150 ) 151 wrapped_children.append(child) 152 return wrapped_children 153 154 @validation.warning(EMPTY_LAYOUT) 155 def _check_empty_layout(self): 156 from itertools import chain 157 if not list(chain(self.children)): 158 return str(self) 159 160 @validation.warning(BOTH_CHILD_AND_ROOT) 161 def _check_child_is_also_root(self): 162 problems = [] 163 for c in self.children: 164 if c.document is not None and c in c.document.roots: 165 problems.append(str(c)) 166 if problems: 167 return ", ".join(problems) 168 else: 169 return None 170 171 #TODO Debating the following instead to prevent people adding just a plain 172 # widget into a box, which sometimes works and sometimes looks disastrous 173 #children = List( 174 # Either( 175 # Instance('bokeh.models.layouts.Row'), 176 # Instance('bokeh.models.layouts.Column'), 177 # Instance('bokeh.models.plots.Plot'), 178 # Instance('bokeh.models.layouts.WidgetBox') 179 # ), help=""" 180 # The list of children, which can be other components including plots, rows, columns, and widgets. 181 #""") 182 children = List(Instance(LayoutDOM), help=""" 183 The list of children, which can be other components including plots, rows, columns, and widgets. 184 """) 185 186 responsive = Override(default='fixed') 187 188 189 class Row(Box): 190 """ Lay out child components in a single horizontal row. 191 192 Children can be specified as positional arguments, as a single argument 193 that is a sequence, or using the ``children`` keyword argument. 194 """ 195 196 197 class Column(Box): 198 """ Lay out child components in a single vertical row. 199 200 Children can be specified as positional arguments, as a single argument 201 that is a sequence, or using the ``children`` keyword argument. 202 """ 203 204 205 # ---- DEPRECATIONS 206 207 @deprecated("Bokeh 0.12.0", "bokeh.layouts.gridplot") 208 def GridPlot(*args, **kwargs): 209 from bokeh.layouts import gridplot 210 return gridplot(*args, **kwargs) 211 212 213 @deprecated("Bokeh 0.12.0", "bokeh.models.layouts.Row") 214 def HBox(*args, **kwargs): 215 return Row(*args, **kwargs) 216 217 218 @deprecated("Bokeh 0.12.0", "bokeh.models.layouts.Column") 219 def VBox(*args, **kwargs): 220 return Column(*args, **kwargs) 221 222 223 @deprecated("Bokeh 0.12.0", "bokeh.models.layouts.WidgetBox") 224 def VBoxForm(*args, **kwargs): 225 from bokeh.models.widgets.widget import Widget 226 227 if len(args) > 0 and "children" in kwargs: 228 raise ValueError("'children' keyword cannot be used with positional arguments") 229 elif len(args) > 0: 230 children = list(args) 231 else: 232 children = kwargs.get("children", []) 233 is_widget = [isinstance(item, Widget) for item in children] 234 if all(is_widget): 235 return WidgetBox(*args, **kwargs) 236 else: 237 warnings.warn( 238 """WARNING: Non-widgets added to VBoxForm! VBoxForm is deprecated and is 239 being replaced with WidgetBox. WidgetBox does not allow you to add non-widgets to it. 240 We have transformed your request into a Column, with your Plots and WidgetBox(es) inside 241 it. In the future, you will need to update your code to use Row and Column. You may 242 find the new bokeh.layouts functions helpful. 243 """) 244 return Column(*args, **kwargs) 245 [end of bokeh/models/layouts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bokeh/models/layouts.py b/bokeh/models/layouts.py --- a/bokeh/models/layouts.py +++ b/bokeh/models/layouts.py @@ -202,23 +202,33 @@ """ -# ---- DEPRECATIONS - -@deprecated("Bokeh 0.12.0", "bokeh.layouts.gridplot") -def GridPlot(*args, **kwargs): - from bokeh.layouts import gridplot - return gridplot(*args, **kwargs) +def HBox(*args, **kwargs): + """ Lay out child components in a single horizontal row. + Children can be specified as positional arguments, as a single argument + that is a sequence, or using the ``children`` keyword argument. -@deprecated("Bokeh 0.12.0", "bokeh.models.layouts.Row") -def HBox(*args, **kwargs): + Returns a Row instance. + """ return Row(*args, **kwargs) -@deprecated("Bokeh 0.12.0", "bokeh.models.layouts.Column") def VBox(*args, **kwargs): + """ Lay out child components in a single vertical row. + + Children can be specified as positional arguments, as a single argument + that is a sequence, or using the ``children`` keyword argument. + + Returns a Column instance. + """ return Column(*args, **kwargs) +# ---- DEPRECATIONS + +@deprecated("Bokeh 0.12.0", "bokeh.layouts.gridplot") +def GridPlot(*args, **kwargs): + from bokeh.layouts import gridplot + return gridplot(*args, **kwargs) @deprecated("Bokeh 0.12.0", "bokeh.models.layouts.WidgetBox") def VBoxForm(*args, **kwargs):
{"golden_diff": "diff --git a/bokeh/models/layouts.py b/bokeh/models/layouts.py\n--- a/bokeh/models/layouts.py\n+++ b/bokeh/models/layouts.py\n@@ -202,23 +202,33 @@\n \"\"\"\n \n \n-# ---- DEPRECATIONS\n-\n-@deprecated(\"Bokeh 0.12.0\", \"bokeh.layouts.gridplot\")\n-def GridPlot(*args, **kwargs):\n- from bokeh.layouts import gridplot\n- return gridplot(*args, **kwargs)\n+def HBox(*args, **kwargs):\n+ \"\"\" Lay out child components in a single horizontal row.\n \n+ Children can be specified as positional arguments, as a single argument\n+ that is a sequence, or using the ``children`` keyword argument.\n \n-@deprecated(\"Bokeh 0.12.0\", \"bokeh.models.layouts.Row\")\n-def HBox(*args, **kwargs):\n+ Returns a Row instance.\n+ \"\"\"\n return Row(*args, **kwargs)\n \n \n-@deprecated(\"Bokeh 0.12.0\", \"bokeh.models.layouts.Column\")\n def VBox(*args, **kwargs):\n+ \"\"\" Lay out child components in a single vertical row.\n+\n+ Children can be specified as positional arguments, as a single argument\n+ that is a sequence, or using the ``children`` keyword argument.\n+\n+ Returns a Column instance.\n+ \"\"\"\n return Column(*args, **kwargs)\n \n+# ---- DEPRECATIONS\n+\n+@deprecated(\"Bokeh 0.12.0\", \"bokeh.layouts.gridplot\")\n+def GridPlot(*args, **kwargs):\n+ from bokeh.layouts import gridplot\n+ return gridplot(*args, **kwargs)\n \n @deprecated(\"Bokeh 0.12.0\", \"bokeh.models.layouts.WidgetBox\")\n def VBoxForm(*args, **kwargs):\n", "issue": "Undeprecate VBox and HBox\nI would like names `VBox` and `HBox` to stay, because they are more meaningful to from UI point of view than the new counterparts.\n\n", "before_files": [{"content": "\"\"\" Various kinds of layout components.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport warnings\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom ..core import validation\n\nfrom ..core.validation.warnings import (\n EMPTY_LAYOUT,\n BOTH_CHILD_AND_ROOT,\n)\nfrom ..core.enums import Location, Responsive as ResponsiveEnum\nfrom ..core.properties import abstract, Bool, Int, Instance, List, Responsive, Override\nfrom ..embed import notebook_div\nfrom ..model import Model\nfrom ..util.deprecate import deprecated\n\n\n@abstract\nclass LayoutDOM(Model):\n \"\"\" An abstract base class for layout components. ``LayoutDOM`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\n width = Int(help=\"\"\"\n An optional width for the component (in pixels).\n \"\"\")\n\n height = Int(help=\"\"\"\n An optional height for the component (in pixels).\n \"\"\")\n\n disabled = Bool(False, help=\"\"\"\n Whether the widget will be disabled when rendered. If ``True``,\n the widget will be greyed-out, and not respond to UI events.\n \"\"\")\n\n responsive = Responsive(help=\"\"\"\n The type of responsiveness for the item being displayed. Possible values are\n ``\"fixed\"`` (or ``False``), ``\"scale_width\"`` (or ``True``),\n ``\"scale_height\"``, ``\"scale_both\"``, ``\"stretch_both\"``.\n\n ``\"stretch_both\"`` elements are completely responsive (independently in width and height) and\n will resize to occupy all available space, even if this changes the aspect ratio of the element.\n This is sometimes called outside-in, and is a typical behavior for desktop applications.\n\n ``\"fixed\"`` elements are not responsive. They will retain their original width and height\n regardless of any subsequent browser window resize events.\n\n ``\"scale_width\"`` elements will responsively resize to fit to the width available, *while\n maintaining the original aspect ratio*. This is a typical behavior for modern websites. For a\n ``Plot``, the aspect ratio ``plot_width/plot_height`` is maintained.\n\n ``\"scale_height\"`` elements will responsively resize to fit to the height available, *while\n maintaining the original aspect ratio*. For a ``Plot``, the aspect ratio\n ``plot_width/plot_height`` is maintained. A plot with ``\"scale_height\"`` mode needs\n to be wrapped in a ``Row`` or ``Column`` to be responsive.\n\n\n ``\"scale_both\"`` elements will responsively resize to fir both the width and height available,\n *while maintaining the original aspect ratio*.\n\n \"\"\")\n\n # TODO: (mp) Not yet, because it breaks plotting/notebook examples.\n # Rename to _repr_html_ if we decide to enable this by default.\n def __repr_html__(self):\n return notebook_div(self)\n\n @property\n def html(self):\n from IPython.core.display import HTML\n return HTML(self.__repr_html__())\n\n\nclass Spacer(LayoutDOM):\n \"\"\" A container for space used to fill an empty spot in a row or column.\n\n \"\"\"\n\n\nclass WidgetBox(LayoutDOM):\n \"\"\" A container for widgets that are part of a layout.\"\"\"\n def __init__(self, *args, **kwargs):\n if len(args) > 0 and \"children\" in kwargs:\n raise ValueError(\"'children' keyword cannot be used with positional arguments\")\n elif len(args) > 0:\n kwargs[\"children\"] = list(args)\n super(WidgetBox, self).__init__(**kwargs)\n\n @validation.warning(EMPTY_LAYOUT)\n def _check_empty_layout(self):\n from itertools import chain\n if not list(chain(self.children)):\n return str(self)\n\n @validation.warning(BOTH_CHILD_AND_ROOT)\n def _check_child_is_also_root(self):\n problems = []\n for c in self.children:\n if c.document is not None and c in c.document.roots:\n problems.append(str(c))\n if problems:\n return \", \".join(problems)\n else:\n return None\n\n children = List(Instance('bokeh.models.widgets.Widget'), help=\"\"\"\n The list of widgets to put in the layout box.\n \"\"\")\n\n responsive = Override(default='fixed')\n\n\n@abstract\nclass Box(LayoutDOM):\n \"\"\" Abstract base class for Row and Column. Do not use directly.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n\n if len(args) > 0 and \"children\" in kwargs:\n raise ValueError(\"'children' keyword cannot be used with positional arguments\")\n elif len(args) > 0:\n kwargs[\"children\"] = list(args)\n\n unwrapped_children = kwargs.get(\"children\", [])\n kwargs[\"children\"] = self._wrap_children(unwrapped_children)\n super(Box, self).__init__(**kwargs)\n\n def _wrap_children(self, children):\n \"\"\" Wrap any Widgets of a list of child layouts in a WidgetBox.\n This allows for the convenience of just spelling Row(button1, button2).\n \"\"\"\n from .widgets.widget import Widget\n wrapped_children = []\n for child in children:\n if isinstance(child, Widget):\n child = WidgetBox(\n children=[child],\n responsive=child.responsive,\n width=child.width,\n height=child.height,\n disabled=child.disabled\n )\n wrapped_children.append(child)\n return wrapped_children\n\n @validation.warning(EMPTY_LAYOUT)\n def _check_empty_layout(self):\n from itertools import chain\n if not list(chain(self.children)):\n return str(self)\n\n @validation.warning(BOTH_CHILD_AND_ROOT)\n def _check_child_is_also_root(self):\n problems = []\n for c in self.children:\n if c.document is not None and c in c.document.roots:\n problems.append(str(c))\n if problems:\n return \", \".join(problems)\n else:\n return None\n\n #TODO Debating the following instead to prevent people adding just a plain\n # widget into a box, which sometimes works and sometimes looks disastrous\n #children = List(\n # Either(\n # Instance('bokeh.models.layouts.Row'),\n # Instance('bokeh.models.layouts.Column'),\n # Instance('bokeh.models.plots.Plot'),\n # Instance('bokeh.models.layouts.WidgetBox')\n # ), help=\"\"\"\n # The list of children, which can be other components including plots, rows, columns, and widgets.\n #\"\"\")\n children = List(Instance(LayoutDOM), help=\"\"\"\n The list of children, which can be other components including plots, rows, columns, and widgets.\n \"\"\")\n\n responsive = Override(default='fixed')\n\n\nclass Row(Box):\n \"\"\" Lay out child components in a single horizontal row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n \"\"\"\n\n\nclass Column(Box):\n \"\"\" Lay out child components in a single vertical row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n \"\"\"\n\n\n# ---- DEPRECATIONS\n\n@deprecated(\"Bokeh 0.12.0\", \"bokeh.layouts.gridplot\")\ndef GridPlot(*args, **kwargs):\n from bokeh.layouts import gridplot\n return gridplot(*args, **kwargs)\n\n\n@deprecated(\"Bokeh 0.12.0\", \"bokeh.models.layouts.Row\")\ndef HBox(*args, **kwargs):\n return Row(*args, **kwargs)\n\n\n@deprecated(\"Bokeh 0.12.0\", \"bokeh.models.layouts.Column\")\ndef VBox(*args, **kwargs):\n return Column(*args, **kwargs)\n\n\n@deprecated(\"Bokeh 0.12.0\", \"bokeh.models.layouts.WidgetBox\")\ndef VBoxForm(*args, **kwargs):\n from bokeh.models.widgets.widget import Widget\n\n if len(args) > 0 and \"children\" in kwargs:\n raise ValueError(\"'children' keyword cannot be used with positional arguments\")\n elif len(args) > 0:\n children = list(args)\n else:\n children = kwargs.get(\"children\", [])\n is_widget = [isinstance(item, Widget) for item in children]\n if all(is_widget):\n return WidgetBox(*args, **kwargs)\n else:\n warnings.warn(\n \"\"\"WARNING: Non-widgets added to VBoxForm! VBoxForm is deprecated and is\n being replaced with WidgetBox. WidgetBox does not allow you to add non-widgets to it.\n We have transformed your request into a Column, with your Plots and WidgetBox(es) inside\n it. In the future, you will need to update your code to use Row and Column. You may\n find the new bokeh.layouts functions helpful.\n \"\"\")\n return Column(*args, **kwargs)\n", "path": "bokeh/models/layouts.py"}]}
3,126
405
gh_patches_debug_20164
rasdani/github-patches
git_diff
pytorch__vision-2258
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Raise error if target boxes are degenerate in Faster R-CNN We have had a number of reports with users saying that their training loss is nan after a few iterations. Most of the time, this is due to degenerate boxes (i.e., boxes with negative sizes or zero area). We should improve the user experience in those situations. I think that raising an error in `GeneralizedRCNN` if the target boxes are degenerate would be a good compromise. Related issues: https://github.com/pytorch/vision/issues/2235 https://github.com/pytorch/vision/issues/1994 https://github.com/pytorch/vision/issues/1176 https://github.com/pytorch/vision/issues/1128 #1120 and #997 </issue> <code> [start of torchvision/models/detection/generalized_rcnn.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 """ 3 Implements the Generalized R-CNN framework 4 """ 5 6 from collections import OrderedDict 7 import torch 8 from torch import nn 9 import warnings 10 from torch.jit.annotations import Tuple, List, Dict, Optional 11 from torch import Tensor 12 13 14 class GeneralizedRCNN(nn.Module): 15 """ 16 Main class for Generalized R-CNN. 17 18 Arguments: 19 backbone (nn.Module): 20 rpn (nn.Module): 21 roi_heads (nn.Module): takes the features + the proposals from the RPN and computes 22 detections / masks from it. 23 transform (nn.Module): performs the data transformation from the inputs to feed into 24 the model 25 """ 26 27 def __init__(self, backbone, rpn, roi_heads, transform): 28 super(GeneralizedRCNN, self).__init__() 29 self.transform = transform 30 self.backbone = backbone 31 self.rpn = rpn 32 self.roi_heads = roi_heads 33 # used only on torchscript mode 34 self._has_warned = False 35 36 @torch.jit.unused 37 def eager_outputs(self, losses, detections): 38 # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] 39 if self.training: 40 return losses 41 42 return detections 43 44 def forward(self, images, targets=None): 45 # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] 46 """ 47 Arguments: 48 images (list[Tensor]): images to be processed 49 targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional) 50 51 Returns: 52 result (list[BoxList] or dict[Tensor]): the output from the model. 53 During training, it returns a dict[Tensor] which contains the losses. 54 During testing, it returns list[BoxList] contains additional fields 55 like `scores`, `labels` and `mask` (for Mask R-CNN models). 56 57 """ 58 if self.training and targets is None: 59 raise ValueError("In training mode, targets should be passed") 60 if self.training: 61 assert targets is not None 62 for target in targets: 63 boxes = target["boxes"] 64 if isinstance(boxes, torch.Tensor): 65 if len(boxes.shape) != 2 or boxes.shape[-1] != 4: 66 raise ValueError("Expected target boxes to be a tensor" 67 "of shape [N, 4], got {:}.".format( 68 boxes.shape)) 69 else: 70 raise ValueError("Expected target boxes to be of type " 71 "Tensor, got {:}.".format(type(boxes))) 72 73 original_image_sizes = torch.jit.annotate(List[Tuple[int, int]], []) 74 for img in images: 75 val = img.shape[-2:] 76 assert len(val) == 2 77 original_image_sizes.append((val[0], val[1])) 78 79 images, targets = self.transform(images, targets) 80 features = self.backbone(images.tensors) 81 if isinstance(features, torch.Tensor): 82 features = OrderedDict([('0', features)]) 83 proposals, proposal_losses = self.rpn(images, features, targets) 84 detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets) 85 detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes) 86 87 losses = {} 88 losses.update(detector_losses) 89 losses.update(proposal_losses) 90 91 if torch.jit.is_scripting(): 92 if not self._has_warned: 93 warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting") 94 self._has_warned = True 95 return (losses, detections) 96 else: 97 return self.eager_outputs(losses, detections) 98 [end of torchvision/models/detection/generalized_rcnn.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -77,6 +77,21 @@ original_image_sizes.append((val[0], val[1])) images, targets = self.transform(images, targets) + + # Check for degenerate boxes + # TODO: Move this to a function + if targets is not None: + for target_idx, target in enumerate(targets): + boxes = target["boxes"] + degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] + if degenerate_boxes.any(): + # print the first degenrate box + bb_idx = degenerate_boxes.any(dim=1).nonzero().view(-1)[0] + degen_bb: List[float] = boxes[bb_idx].tolist() + raise ValueError("All bounding boxes should have positive height and width." + " Found invaid box {} for target at index {}." + .format(degen_bb, target_idx)) + features = self.backbone(images.tensors) if isinstance(features, torch.Tensor): features = OrderedDict([('0', features)])
{"golden_diff": "diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py\n--- a/torchvision/models/detection/generalized_rcnn.py\n+++ b/torchvision/models/detection/generalized_rcnn.py\n@@ -77,6 +77,21 @@\n original_image_sizes.append((val[0], val[1]))\n \n images, targets = self.transform(images, targets)\n+\n+ # Check for degenerate boxes\n+ # TODO: Move this to a function\n+ if targets is not None:\n+ for target_idx, target in enumerate(targets):\n+ boxes = target[\"boxes\"]\n+ degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]\n+ if degenerate_boxes.any():\n+ # print the first degenrate box\n+ bb_idx = degenerate_boxes.any(dim=1).nonzero().view(-1)[0]\n+ degen_bb: List[float] = boxes[bb_idx].tolist()\n+ raise ValueError(\"All bounding boxes should have positive height and width.\"\n+ \" Found invaid box {} for target at index {}.\"\n+ .format(degen_bb, target_idx))\n+\n features = self.backbone(images.tensors)\n if isinstance(features, torch.Tensor):\n features = OrderedDict([('0', features)])\n", "issue": "Raise error if target boxes are degenerate in Faster R-CNN\nWe have had a number of reports with users saying that their training loss is nan after a few iterations.\r\n\r\nMost of the time, this is due to degenerate boxes (i.e., boxes with negative sizes or zero area). We should improve the user experience in those situations.\r\n\r\nI think that raising an error in `GeneralizedRCNN` if the target boxes are degenerate would be a good compromise.\r\n\r\nRelated issues: https://github.com/pytorch/vision/issues/2235 https://github.com/pytorch/vision/issues/1994 https://github.com/pytorch/vision/issues/1176 https://github.com/pytorch/vision/issues/1128 #1120 and #997\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\"\"\"\nImplements the Generalized R-CNN framework\n\"\"\"\n\nfrom collections import OrderedDict\nimport torch\nfrom torch import nn\nimport warnings\nfrom torch.jit.annotations import Tuple, List, Dict, Optional\nfrom torch import Tensor\n\n\nclass GeneralizedRCNN(nn.Module):\n \"\"\"\n Main class for Generalized R-CNN.\n\n Arguments:\n backbone (nn.Module):\n rpn (nn.Module):\n roi_heads (nn.Module): takes the features + the proposals from the RPN and computes\n detections / masks from it.\n transform (nn.Module): performs the data transformation from the inputs to feed into\n the model\n \"\"\"\n\n def __init__(self, backbone, rpn, roi_heads, transform):\n super(GeneralizedRCNN, self).__init__()\n self.transform = transform\n self.backbone = backbone\n self.rpn = rpn\n self.roi_heads = roi_heads\n # used only on torchscript mode\n self._has_warned = False\n\n @torch.jit.unused\n def eager_outputs(self, losses, detections):\n # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]\n if self.training:\n return losses\n\n return detections\n\n def forward(self, images, targets=None):\n # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]\n \"\"\"\n Arguments:\n images (list[Tensor]): images to be processed\n targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)\n\n Returns:\n result (list[BoxList] or dict[Tensor]): the output from the model.\n During training, it returns a dict[Tensor] which contains the losses.\n During testing, it returns list[BoxList] contains additional fields\n like `scores`, `labels` and `mask` (for Mask R-CNN models).\n\n \"\"\"\n if self.training and targets is None:\n raise ValueError(\"In training mode, targets should be passed\")\n if self.training:\n assert targets is not None\n for target in targets:\n boxes = target[\"boxes\"]\n if isinstance(boxes, torch.Tensor):\n if len(boxes.shape) != 2 or boxes.shape[-1] != 4:\n raise ValueError(\"Expected target boxes to be a tensor\"\n \"of shape [N, 4], got {:}.\".format(\n boxes.shape))\n else:\n raise ValueError(\"Expected target boxes to be of type \"\n \"Tensor, got {:}.\".format(type(boxes)))\n\n original_image_sizes = torch.jit.annotate(List[Tuple[int, int]], [])\n for img in images:\n val = img.shape[-2:]\n assert len(val) == 2\n original_image_sizes.append((val[0], val[1]))\n\n images, targets = self.transform(images, targets)\n features = self.backbone(images.tensors)\n if isinstance(features, torch.Tensor):\n features = OrderedDict([('0', features)])\n proposals, proposal_losses = self.rpn(images, features, targets)\n detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)\n detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)\n\n losses = {}\n losses.update(detector_losses)\n losses.update(proposal_losses)\n\n if torch.jit.is_scripting():\n if not self._has_warned:\n warnings.warn(\"RCNN always returns a (Losses, Detections) tuple in scripting\")\n self._has_warned = True\n return (losses, detections)\n else:\n return self.eager_outputs(losses, detections)\n", "path": "torchvision/models/detection/generalized_rcnn.py"}]}
1,724
286
gh_patches_debug_1514
rasdani/github-patches
git_diff
ocadotechnology__aimmo-543
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Latest minikube not starting on Travis CI Same issue and hopefully fix as this https://github.com/kubernetes/minikube/issues/2704 </issue> <code> [start of setup.py] 1 # -*- coding: utf-8 -*- 2 from setuptools import find_packages, setup 3 4 import versioneer 5 6 setup( 7 name='aimmo', 8 cmdclass=versioneer.get_cmdclass(), 9 packages=find_packages(), 10 include_package_data=True, 11 install_requires=[ 12 'django >= 1.8.3, < 1.9.0', 13 'django-autoconfig >= 0.3.6, < 1.0.0', 14 'django-forms-bootstrap', 15 'django-js-reverse', 16 'eventlet', 17 'flask', 18 'flask-socketio', 19 'requests', 20 'six', 21 'pykube', 22 'hypothesis', 23 'flask-cors >= 3.0, < 3.1', 24 'psutil >= 5.4, < 5.5', 25 ], 26 tests_require=[ 27 'django-setuptest', 28 'httmock', 29 'mock == 2.0.0', 30 'docker == 2.7.0', 31 'kubernetes == 4.0.0', 32 'PyYAML == 3.12', 33 ], 34 test_suite='setuptest.setuptest.SetupTestSuite', 35 version=versioneer.get_version(), 36 zip_safe=False, 37 ) 38 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ 'httmock', 'mock == 2.0.0', 'docker == 2.7.0', - 'kubernetes == 4.0.0', + 'kubernetes == 5.0.0', 'PyYAML == 3.12', ], test_suite='setuptest.setuptest.SetupTestSuite',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -28,7 +28,7 @@\n 'httmock',\n 'mock == 2.0.0',\n 'docker == 2.7.0',\n- 'kubernetes == 4.0.0',\n+ 'kubernetes == 5.0.0',\n 'PyYAML == 3.12',\n ],\n test_suite='setuptest.setuptest.SetupTestSuite',\n", "issue": "Latest minikube not starting on Travis CI\nSame issue and hopefully fix as this https://github.com/kubernetes/minikube/issues/2704\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nsetup(\n name='aimmo',\n cmdclass=versioneer.get_cmdclass(),\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'django >= 1.8.3, < 1.9.0',\n 'django-autoconfig >= 0.3.6, < 1.0.0',\n 'django-forms-bootstrap',\n 'django-js-reverse',\n 'eventlet',\n 'flask',\n 'flask-socketio',\n 'requests',\n 'six',\n 'pykube',\n 'hypothesis',\n 'flask-cors >= 3.0, < 3.1',\n 'psutil >= 5.4, < 5.5',\n ],\n tests_require=[\n 'django-setuptest',\n 'httmock',\n 'mock == 2.0.0',\n 'docker == 2.7.0',\n 'kubernetes == 4.0.0',\n 'PyYAML == 3.12',\n ],\n test_suite='setuptest.setuptest.SetupTestSuite',\n version=versioneer.get_version(),\n zip_safe=False,\n)\n", "path": "setup.py"}]}
903
114
gh_patches_debug_1492
rasdani/github-patches
git_diff
wright-group__WrightTools-590
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Change __version__ to match pep 440 Specifically, when a branch is specified, it should use a plus sign instead of minus https://www.python.org/dev/peps/pep-0440/#local-version-identifiers https://github.com/wright-group/WrightTools/blob/490a4a3d6fb6f016e7033d661b553b72c2d86fcb/WrightTools/__version__.py#L33 </issue> <code> [start of WrightTools/__version__.py] 1 """Define WrightTools version.""" 2 3 4 # --- import -------------------------------------------------------------------------------------- 5 6 7 import os 8 9 10 # ---- define ------------------------------------------------------------------------------------- 11 12 13 here = os.path.abspath(os.path.dirname(__file__)) 14 15 16 __all__ = ['__version__', '__branch__'] 17 18 19 # --- version ------------------------------------------------------------------------------------- 20 21 22 # read from VERSION file 23 with open(os.path.join(os.path.dirname(here), 'VERSION')) as f: 24 __version__ = f.read().strip() 25 26 27 # add git branch, if appropriate 28 p = os.path.join(os.path.dirname(here), '.git', 'HEAD') 29 if os.path.isfile(p): 30 with open(p) as f: 31 __branch__ = f.readline().rstrip().split(r'/')[-1] 32 if __branch__ != 'master': 33 __version__ += '-' + __branch__ 34 else: 35 __branch__ = None 36 [end of WrightTools/__version__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/WrightTools/__version__.py b/WrightTools/__version__.py --- a/WrightTools/__version__.py +++ b/WrightTools/__version__.py @@ -30,6 +30,6 @@ with open(p) as f: __branch__ = f.readline().rstrip().split(r'/')[-1] if __branch__ != 'master': - __version__ += '-' + __branch__ + __version__ += '+' + __branch__ else: __branch__ = None
{"golden_diff": "diff --git a/WrightTools/__version__.py b/WrightTools/__version__.py\n--- a/WrightTools/__version__.py\n+++ b/WrightTools/__version__.py\n@@ -30,6 +30,6 @@\n with open(p) as f:\n __branch__ = f.readline().rstrip().split(r'/')[-1]\n if __branch__ != 'master':\n- __version__ += '-' + __branch__\n+ __version__ += '+' + __branch__\n else:\n __branch__ = None\n", "issue": "Change __version__ to match pep 440\nSpecifically, when a branch is specified, it should use a plus sign instead of minus\r\n\r\nhttps://www.python.org/dev/peps/pep-0440/#local-version-identifiers\r\n\r\nhttps://github.com/wright-group/WrightTools/blob/490a4a3d6fb6f016e7033d661b553b72c2d86fcb/WrightTools/__version__.py#L33\n", "before_files": [{"content": "\"\"\"Define WrightTools version.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\n\n\n# ---- define -------------------------------------------------------------------------------------\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\n__all__ = ['__version__', '__branch__']\n\n\n# --- version -------------------------------------------------------------------------------------\n\n\n# read from VERSION file\nwith open(os.path.join(os.path.dirname(here), 'VERSION')) as f:\n __version__ = f.read().strip()\n\n\n# add git branch, if appropriate\np = os.path.join(os.path.dirname(here), '.git', 'HEAD')\nif os.path.isfile(p):\n with open(p) as f:\n __branch__ = f.readline().rstrip().split(r'/')[-1]\n if __branch__ != 'master':\n __version__ += '-' + __branch__\nelse:\n __branch__ = None\n", "path": "WrightTools/__version__.py"}]}
897
117
gh_patches_debug_30334
rasdani/github-patches
git_diff
Lightning-AI__pytorch-lightning-1360
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> WandbLogger cannot be used with 'ddp' <!-- ### Common bugs: 1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). 2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) --> ## 🐛 Bug wandb modifies `init` such that a child process calling init returns None if the master process has called init. This seems to cause a bug with ddp, and results in rank zero having experiment = None, which crashes the program. ### To Reproduce Can be reproduced with the basic MNIST gpu template, simply add a WandbLogger and pass 'ddp' as the distributed backend. ``` -- Process 0 terminated with the following error: Traceback (most recent call last): File "/home/rmrao/anaconda3/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 19, in _wrap fn(i, *args) File "/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 331, in ddp_train self.run_pretrain_routine(model) File "/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 757, in run_pretrain_routine self.logger.log_hyperparams(ref_model.hparams) File "/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/logging/base.py", line 14, in wrapped_fn fn(self, *args, **kwargs) File "/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/logging/wandb.py", line 79, in log_hyperparams self.experiment.config.update(params) AttributeError: 'NoneType' object has no attribute 'config' ``` This occurs with the latest wandb version and with pytorch-lightning 0.6. </issue> <code> [start of pytorch_lightning/loggers/wandb.py] 1 r""" 2 3 .. _wandb: 4 5 WandbLogger 6 ------------- 7 """ 8 import os 9 from argparse import Namespace 10 from typing import Optional, List, Dict, Union, Any 11 12 import torch.nn as nn 13 14 try: 15 import wandb 16 from wandb.wandb_run import Run 17 except ImportError: # pragma: no-cover 18 raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover 19 ' install it with `pip install wandb`.') 20 21 from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only 22 23 24 class WandbLogger(LightningLoggerBase): 25 """ 26 Logger for `W&B <https://www.wandb.com/>`_. 27 28 Args: 29 name (str): display name for the run. 30 save_dir (str): path where data is saved. 31 offline (bool): run offline (data can be streamed later to wandb servers). 32 id or version (str): sets the version, mainly used to resume a previous run. 33 anonymous (bool): enables or explicitly disables anonymous logging. 34 project (str): the name of the project to which this run will belong. 35 tags (list of str): tags associated with this run. 36 log_model (bool): save checkpoints in wandb dir to upload on W&B servers. 37 38 Example 39 -------- 40 .. code-block:: python 41 42 from pytorch_lightning.loggers import WandbLogger 43 from pytorch_lightning import Trainer 44 45 wandb_logger = WandbLogger() 46 trainer = Trainer(logger=wandb_logger) 47 """ 48 49 def __init__(self, name: Optional[str] = None, save_dir: Optional[str] = None, 50 offline: bool = False, id: Optional[str] = None, anonymous: bool = False, 51 version: Optional[str] = None, project: Optional[str] = None, 52 tags: Optional[List[str]] = None, log_model: bool = False, 53 experiment=None, entity=None): 54 super().__init__() 55 self._name = name 56 self._save_dir = save_dir 57 self._anonymous = 'allow' if anonymous else None 58 self._id = version or id 59 self._tags = tags 60 self._project = project 61 self._experiment = experiment 62 self._offline = offline 63 self._entity = entity 64 self._log_model = log_model 65 66 def __getstate__(self): 67 state = self.__dict__.copy() 68 # cannot be pickled 69 state['_experiment'] = None 70 # args needed to reload correct experiment 71 state['_id'] = self.experiment.id 72 return state 73 74 @property 75 def experiment(self) -> Run: 76 r""" 77 78 Actual wandb object. To use wandb features do the following. 79 80 Example:: 81 82 self.logger.experiment.some_wandb_function() 83 84 """ 85 if self._experiment is None: 86 if self._offline: 87 os.environ['WANDB_MODE'] = 'dryrun' 88 self._experiment = wandb.init( 89 name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous, 90 id=self._id, resume='allow', tags=self._tags, entity=self._entity) 91 # save checkpoints in wandb dir to upload on W&B servers 92 if self._log_model: 93 self.save_dir = self._experiment.dir 94 return self._experiment 95 96 def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100): 97 wandb.watch(model, log=log, log_freq=log_freq) 98 99 @rank_zero_only 100 def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: 101 params = self._convert_params(params) 102 self.experiment.config.update(params) 103 104 @rank_zero_only 105 def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None: 106 if step is not None: 107 metrics['global_step'] = step 108 self.experiment.log(metrics) 109 110 @property 111 def name(self) -> str: 112 return self.experiment.project_name() 113 114 @property 115 def version(self) -> str: 116 return self.experiment.id 117 [end of pytorch_lightning/loggers/wandb.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py --- a/pytorch_lightning/loggers/wandb.py +++ b/pytorch_lightning/loggers/wandb.py @@ -65,10 +65,11 @@ def __getstate__(self): state = self.__dict__.copy() + # args needed to reload correct experiment + state['_id'] = self._experiment.id if self._experiment is not None else None + # cannot be pickled state['_experiment'] = None - # args needed to reload correct experiment - state['_id'] = self.experiment.id return state @property @@ -87,7 +88,7 @@ os.environ['WANDB_MODE'] = 'dryrun' self._experiment = wandb.init( name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous, - id=self._id, resume='allow', tags=self._tags, entity=self._entity) + reinit=True, id=self._id, resume='allow', tags=self._tags, entity=self._entity) # save checkpoints in wandb dir to upload on W&B servers if self._log_model: self.save_dir = self._experiment.dir @@ -109,8 +110,11 @@ @property def name(self) -> str: - return self.experiment.project_name() + # don't create an experiment if we don't have one + name = self._experiment.project_name() if self._experiment else None + return name @property def version(self) -> str: - return self.experiment.id + # don't create an experiment if we don't have one + return self._experiment.id if self._experiment else None
{"golden_diff": "diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py\n--- a/pytorch_lightning/loggers/wandb.py\n+++ b/pytorch_lightning/loggers/wandb.py\n@@ -65,10 +65,11 @@\n \n def __getstate__(self):\n state = self.__dict__.copy()\n+ # args needed to reload correct experiment\n+ state['_id'] = self._experiment.id if self._experiment is not None else None\n+\n # cannot be pickled\n state['_experiment'] = None\n- # args needed to reload correct experiment\n- state['_id'] = self.experiment.id\n return state\n \n @property\n@@ -87,7 +88,7 @@\n os.environ['WANDB_MODE'] = 'dryrun'\n self._experiment = wandb.init(\n name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,\n- id=self._id, resume='allow', tags=self._tags, entity=self._entity)\n+ reinit=True, id=self._id, resume='allow', tags=self._tags, entity=self._entity)\n # save checkpoints in wandb dir to upload on W&B servers\n if self._log_model:\n self.save_dir = self._experiment.dir\n@@ -109,8 +110,11 @@\n \n @property\n def name(self) -> str:\n- return self.experiment.project_name()\n+ # don't create an experiment if we don't have one\n+ name = self._experiment.project_name() if self._experiment else None\n+ return name\n \n @property\n def version(self) -> str:\n- return self.experiment.id\n+ # don't create an experiment if we don't have one\n+ return self._experiment.id if self._experiment else None\n", "issue": "WandbLogger cannot be used with 'ddp'\n<!-- \r\n### Common bugs:\r\n1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). \r\n2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) \r\n-->\r\n\r\n## \ud83d\udc1b Bug\r\n\r\nwandb modifies `init` such that a child process calling init returns None if the master process has called init. This seems to cause a bug with ddp, and results in rank zero having experiment = None, which crashes the program.\r\n\r\n### To Reproduce\r\n\r\nCan be reproduced with the basic MNIST gpu template, simply add a WandbLogger and pass 'ddp' as the distributed backend.\r\n```\r\n-- Process 0 terminated with the following error:\r\nTraceback (most recent call last):\r\n File \"/home/rmrao/anaconda3/lib/python3.6/site-packages/torch/multiprocessing/spawn.py\", line 19, in _wrap\r\n fn(i, *args)\r\n File \"/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py\", line 331, in ddp_train\r\n self.run_pretrain_routine(model)\r\n File \"/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py\", line 757, in run_pretrain_routine\r\n self.logger.log_hyperparams(ref_model.hparams)\r\n File \"/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/logging/base.py\", line 14, in wrapped_fn\r\n fn(self, *args, **kwargs)\r\n File \"/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/logging/wandb.py\", line 79, in log_hyperparams\r\n self.experiment.config.update(params)\r\nAttributeError: 'NoneType' object has no attribute 'config'\r\n```\r\n\r\nThis occurs with the latest wandb version and with pytorch-lightning 0.6.\r\n\n", "before_files": [{"content": "r\"\"\"\n\n.. _wandb:\n\nWandbLogger\n-------------\n\"\"\"\nimport os\nfrom argparse import Namespace\nfrom typing import Optional, List, Dict, Union, Any\n\nimport torch.nn as nn\n\ntry:\n import wandb\n from wandb.wandb_run import Run\nexcept ImportError: # pragma: no-cover\n raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover\n ' install it with `pip install wandb`.')\n\nfrom pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only\n\n\nclass WandbLogger(LightningLoggerBase):\n \"\"\"\n Logger for `W&B <https://www.wandb.com/>`_.\n\n Args:\n name (str): display name for the run.\n save_dir (str): path where data is saved.\n offline (bool): run offline (data can be streamed later to wandb servers).\n id or version (str): sets the version, mainly used to resume a previous run.\n anonymous (bool): enables or explicitly disables anonymous logging.\n project (str): the name of the project to which this run will belong.\n tags (list of str): tags associated with this run.\n log_model (bool): save checkpoints in wandb dir to upload on W&B servers.\n\n Example\n --------\n .. code-block:: python\n\n from pytorch_lightning.loggers import WandbLogger\n from pytorch_lightning import Trainer\n\n wandb_logger = WandbLogger()\n trainer = Trainer(logger=wandb_logger)\n \"\"\"\n\n def __init__(self, name: Optional[str] = None, save_dir: Optional[str] = None,\n offline: bool = False, id: Optional[str] = None, anonymous: bool = False,\n version: Optional[str] = None, project: Optional[str] = None,\n tags: Optional[List[str]] = None, log_model: bool = False,\n experiment=None, entity=None):\n super().__init__()\n self._name = name\n self._save_dir = save_dir\n self._anonymous = 'allow' if anonymous else None\n self._id = version or id\n self._tags = tags\n self._project = project\n self._experiment = experiment\n self._offline = offline\n self._entity = entity\n self._log_model = log_model\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # cannot be pickled\n state['_experiment'] = None\n # args needed to reload correct experiment\n state['_id'] = self.experiment.id\n return state\n\n @property\n def experiment(self) -> Run:\n r\"\"\"\n\n Actual wandb object. To use wandb features do the following.\n\n Example::\n\n self.logger.experiment.some_wandb_function()\n\n \"\"\"\n if self._experiment is None:\n if self._offline:\n os.environ['WANDB_MODE'] = 'dryrun'\n self._experiment = wandb.init(\n name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,\n id=self._id, resume='allow', tags=self._tags, entity=self._entity)\n # save checkpoints in wandb dir to upload on W&B servers\n if self._log_model:\n self.save_dir = self._experiment.dir\n return self._experiment\n\n def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100):\n wandb.watch(model, log=log, log_freq=log_freq)\n\n @rank_zero_only\n def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:\n params = self._convert_params(params)\n self.experiment.config.update(params)\n\n @rank_zero_only\n def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:\n if step is not None:\n metrics['global_step'] = step\n self.experiment.log(metrics)\n\n @property\n def name(self) -> str:\n return self.experiment.project_name()\n\n @property\n def version(self) -> str:\n return self.experiment.id\n", "path": "pytorch_lightning/loggers/wandb.py"}]}
2,198
419
gh_patches_debug_19325
rasdani/github-patches
git_diff
qtile__qtile-2290
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> WidgetBox does not fully re-expand on click # Issue description On first click, WidgetBox expands properly, but clicking a second time does not hide all widgets. Specifically, in my configuration (below), the Wallpaper is shown/hidden properly, but the Systray does not hide again. # Qtile version qtile-0.17.1.dev82+geb8722ec # Stack traces no errors in qtile.log # Configuration ``` widget.WidgetBox(widgets=[ widget.Wallpaper(directory='~/Downloads/wallpaper/', wallpaper_command=None, random_selection=True, label='\u0394'), widget.Systray(padding=1, icon_size=22), ], close_button_location='right', ), ``` </issue> <code> [start of libqtile/widget/widgetbox.py] 1 # Copyright (c) 2020 elParaguayo 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to deal 5 # in the Software without restriction, including without limitation the rights 6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 # copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 # SOFTWARE. 20 21 from collections import namedtuple 22 23 from libqtile import bar 24 from libqtile.log_utils import logger 25 from libqtile.widget import base 26 27 BoxedWidget = namedtuple("BoxedWidget", ["widget", "draw"]) 28 29 30 def _no_draw(*args, **kwargs): 31 pass 32 33 34 class WidgetBox(base._Widget): 35 """A widget to declutter your bar. 36 37 WidgetBox is a widget that hides widgets by default but shows them when 38 the box is opened. 39 40 Widgets that are hidden will still update etc. as if they were on the main 41 bar. 42 43 Button clicks are passed to widgets when they are visible so callbacks will 44 work. 45 46 Widgets in the box also remain accessible via command interfaces. 47 48 Widgets can only be added to the box via the configuration file. The widget 49 is configured by adding widgets to the "widgets" parameter as follows:: 50 51 widget.WidgetBox(widgets=[ 52 widget.TextBox(text="This widget is in the box"), 53 widget.Memory() 54 ] 55 ), 56 """ 57 orientations = base.ORIENTATION_HORIZONTAL 58 defaults = [ 59 ( 60 "font", 61 "sans", 62 "Text font" 63 ), 64 ( 65 "fontsize", 66 None, 67 "Font pixel size. Calculated if None." 68 ), 69 ( 70 "fontshadow", 71 None, 72 "font shadow color, default is None(no shadow)" 73 ), 74 ( 75 "foreground", 76 "#ffffff", 77 "Foreground colour." 78 ), 79 ( 80 "close_button_location", 81 "left", 82 "Location of close button when box open ('left' or 'right')" 83 ), 84 ( 85 "text_closed", 86 "[<]", 87 "Text when box is closed" 88 ), 89 ( 90 "text_open", 91 "[>]", 92 "Text when box is open" 93 ), 94 ] 95 96 def __init__(self, widgets=list(), **config): 97 base._Widget.__init__(self, bar.CALCULATED, **config) 98 self.add_defaults(WidgetBox.defaults) 99 self.box_is_open = False 100 self._widgets = widgets 101 self.add_callbacks({"Button1": self.cmd_toggle}) 102 103 if self.close_button_location not in ["left", "right"]: 104 val = self.close_button_location 105 msg = "Invalid value for 'close_button_location': {}".format(val) 106 logger.warning(msg) 107 self.close_button_location = "left" 108 109 def _configure(self, qtile, bar): 110 base._Widget._configure(self, qtile, bar) 111 112 self.layout = self.drawer.textlayout( 113 self.text_closed, 114 self.foreground, 115 self.font, 116 self.fontsize, 117 self.fontshadow, 118 markup=False, 119 ) 120 121 for idx, w in enumerate(self._widgets): 122 if w.configured: 123 w = w.create_mirror() 124 self._widgets[idx] = w 125 self.qtile.register_widget(w) 126 w._configure(self.qtile, self.bar) 127 128 # In case the widget is mirrored, we need to draw it once so the 129 # mirror can copy the surface but draw it off screen 130 w.offsetx = self.bar.width 131 self.qtile.call_soon(w.draw) 132 133 # We need to stop hidden widgets from drawing while hidden 134 # (e.g. draw could be triggered by a timer) so we take a reference to 135 # the widget's drawer.draw method 136 self.widgets = [BoxedWidget(w, w.drawer.draw) for w in self._widgets] 137 138 # # Overwrite the current drawer.draw method with a no-op 139 for w in self.widgets: 140 w.widget.drawer.draw = _no_draw 141 142 def calculate_length(self): 143 return self.layout.width 144 145 def set_box_label(self): 146 self.layout.text = (self.text_open if self.box_is_open 147 else self.text_closed) 148 149 def toggle_widgets(self): 150 for item in self.widgets: 151 try: 152 self.bar.widgets.remove(item.widget) 153 # Override drawer.drawer with a no-op 154 item.widget.drawer.draw = _no_draw 155 except ValueError: 156 continue 157 158 index = self.bar.widgets.index(self) 159 160 if self.close_button_location == "left": 161 index += 1 162 163 if self.box_is_open: 164 165 # Need to reverse list as widgets get added in front of eachother. 166 for item in self.widgets[::-1]: 167 # Restore the original drawer.draw method 168 item.widget.drawer.draw = item.draw 169 self.bar.widgets.insert(index, item.widget) 170 171 def draw(self): 172 self.drawer.clear(self.background or self.bar.background) 173 174 self.layout.draw(0, 175 int(self.bar.height / 2.0 - 176 self.layout.height / 2.0) + 1) 177 178 self.drawer.draw(offsetx=self.offsetx, width=self.width) 179 180 def button_press(self, x, y, button): 181 name = "Button{}".format(button) 182 if name in self.mouse_callbacks: 183 self.mouse_callbacks[name]() 184 185 def cmd_toggle(self): 186 """Toggle box state""" 187 self.box_is_open = not self.box_is_open 188 self.toggle_widgets() 189 self.set_box_label() 190 self.bar.draw() 191 [end of libqtile/widget/widgetbox.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libqtile/widget/widgetbox.py b/libqtile/widget/widgetbox.py --- a/libqtile/widget/widgetbox.py +++ b/libqtile/widget/widgetbox.py @@ -22,7 +22,7 @@ from libqtile import bar from libqtile.log_utils import logger -from libqtile.widget import base +from libqtile.widget import Systray, base BoxedWidget = namedtuple("BoxedWidget", ["widget", "draw"]) @@ -152,6 +152,14 @@ self.bar.widgets.remove(item.widget) # Override drawer.drawer with a no-op item.widget.drawer.draw = _no_draw + + # Systray widget needs some additional steps to hide as the icons + # are separate _Window instances. + # Systray unhides icons when it draws so we only need to hide them. + if isinstance(item.widget, Systray): + for icon in item.widget.icons.values(): + icon.hide() + except ValueError: continue
{"golden_diff": "diff --git a/libqtile/widget/widgetbox.py b/libqtile/widget/widgetbox.py\n--- a/libqtile/widget/widgetbox.py\n+++ b/libqtile/widget/widgetbox.py\n@@ -22,7 +22,7 @@\n \n from libqtile import bar\n from libqtile.log_utils import logger\n-from libqtile.widget import base\n+from libqtile.widget import Systray, base\n \n BoxedWidget = namedtuple(\"BoxedWidget\", [\"widget\", \"draw\"])\n \n@@ -152,6 +152,14 @@\n self.bar.widgets.remove(item.widget)\n # Override drawer.drawer with a no-op\n item.widget.drawer.draw = _no_draw\n+\n+ # Systray widget needs some additional steps to hide as the icons\n+ # are separate _Window instances.\n+ # Systray unhides icons when it draws so we only need to hide them.\n+ if isinstance(item.widget, Systray):\n+ for icon in item.widget.icons.values():\n+ icon.hide()\n+\n except ValueError:\n continue\n", "issue": "WidgetBox does not fully re-expand on click\n# Issue description\r\n\r\nOn first click, WidgetBox expands properly, but clicking a second time does not hide all widgets.\r\n\r\nSpecifically, in my configuration (below), the Wallpaper is shown/hidden properly, but the Systray does not hide again.\r\n\r\n# Qtile version\r\n\r\nqtile-0.17.1.dev82+geb8722ec\r\n\r\n# Stack traces\r\n\r\nno errors in qtile.log\r\n\r\n# Configuration\r\n\r\n```\r\n widget.WidgetBox(widgets=[\r\n widget.Wallpaper(directory='~/Downloads/wallpaper/', wallpaper_command=None, random_selection=True, label='\\u0394'),\r\n widget.Systray(padding=1, icon_size=22),\r\n ], close_button_location='right',\r\n ),\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2020 elParaguayo\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom collections import namedtuple\n\nfrom libqtile import bar\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\nBoxedWidget = namedtuple(\"BoxedWidget\", [\"widget\", \"draw\"])\n\n\ndef _no_draw(*args, **kwargs):\n pass\n\n\nclass WidgetBox(base._Widget):\n \"\"\"A widget to declutter your bar.\n\n WidgetBox is a widget that hides widgets by default but shows them when\n the box is opened.\n\n Widgets that are hidden will still update etc. as if they were on the main\n bar.\n\n Button clicks are passed to widgets when they are visible so callbacks will\n work.\n\n Widgets in the box also remain accessible via command interfaces.\n\n Widgets can only be added to the box via the configuration file. The widget\n is configured by adding widgets to the \"widgets\" parameter as follows::\n\n widget.WidgetBox(widgets=[\n widget.TextBox(text=\"This widget is in the box\"),\n widget.Memory()\n ]\n ),\n \"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n (\n \"font\",\n \"sans\",\n \"Text font\"\n ),\n (\n \"fontsize\",\n None,\n \"Font pixel size. Calculated if None.\"\n ),\n (\n \"fontshadow\",\n None,\n \"font shadow color, default is None(no shadow)\"\n ),\n (\n \"foreground\",\n \"#ffffff\",\n \"Foreground colour.\"\n ),\n (\n \"close_button_location\",\n \"left\",\n \"Location of close button when box open ('left' or 'right')\"\n ),\n (\n \"text_closed\",\n \"[<]\",\n \"Text when box is closed\"\n ),\n (\n \"text_open\",\n \"[>]\",\n \"Text when box is open\"\n ),\n ]\n\n def __init__(self, widgets=list(), **config):\n base._Widget.__init__(self, bar.CALCULATED, **config)\n self.add_defaults(WidgetBox.defaults)\n self.box_is_open = False\n self._widgets = widgets\n self.add_callbacks({\"Button1\": self.cmd_toggle})\n\n if self.close_button_location not in [\"left\", \"right\"]:\n val = self.close_button_location\n msg = \"Invalid value for 'close_button_location': {}\".format(val)\n logger.warning(msg)\n self.close_button_location = \"left\"\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n\n self.layout = self.drawer.textlayout(\n self.text_closed,\n self.foreground,\n self.font,\n self.fontsize,\n self.fontshadow,\n markup=False,\n )\n\n for idx, w in enumerate(self._widgets):\n if w.configured:\n w = w.create_mirror()\n self._widgets[idx] = w\n self.qtile.register_widget(w)\n w._configure(self.qtile, self.bar)\n\n # In case the widget is mirrored, we need to draw it once so the\n # mirror can copy the surface but draw it off screen\n w.offsetx = self.bar.width\n self.qtile.call_soon(w.draw)\n\n # We need to stop hidden widgets from drawing while hidden\n # (e.g. draw could be triggered by a timer) so we take a reference to\n # the widget's drawer.draw method\n self.widgets = [BoxedWidget(w, w.drawer.draw) for w in self._widgets]\n\n # # Overwrite the current drawer.draw method with a no-op\n for w in self.widgets:\n w.widget.drawer.draw = _no_draw\n\n def calculate_length(self):\n return self.layout.width\n\n def set_box_label(self):\n self.layout.text = (self.text_open if self.box_is_open\n else self.text_closed)\n\n def toggle_widgets(self):\n for item in self.widgets:\n try:\n self.bar.widgets.remove(item.widget)\n # Override drawer.drawer with a no-op\n item.widget.drawer.draw = _no_draw\n except ValueError:\n continue\n\n index = self.bar.widgets.index(self)\n\n if self.close_button_location == \"left\":\n index += 1\n\n if self.box_is_open:\n\n # Need to reverse list as widgets get added in front of eachother.\n for item in self.widgets[::-1]:\n # Restore the original drawer.draw method\n item.widget.drawer.draw = item.draw\n self.bar.widgets.insert(index, item.widget)\n\n def draw(self):\n self.drawer.clear(self.background or self.bar.background)\n\n self.layout.draw(0,\n int(self.bar.height / 2.0 -\n self.layout.height / 2.0) + 1)\n\n self.drawer.draw(offsetx=self.offsetx, width=self.width)\n\n def button_press(self, x, y, button):\n name = \"Button{}\".format(button)\n if name in self.mouse_callbacks:\n self.mouse_callbacks[name]()\n\n def cmd_toggle(self):\n \"\"\"Toggle box state\"\"\"\n self.box_is_open = not self.box_is_open\n self.toggle_widgets()\n self.set_box_label()\n self.bar.draw()\n", "path": "libqtile/widget/widgetbox.py"}]}
2,510
231
gh_patches_debug_24345
rasdani/github-patches
git_diff
cobbler__cobbler-3650
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Backport] Add support for cnames in dnsmasq ### Original feature issue - Issue: #1643 - PR: #3640 ### Target release - [x] release33 - [ ] release32 - [ ] release30 ### Reason Stabilization of Cobbler 3.3.5 </issue> <code> [start of cobbler/modules/managers/dnsmasq.py] 1 """ 2 This is some of the code behind 'cobbler sync'. 3 4 Copyright 2006-2009, Red Hat, Inc and Others 5 Michael DeHaan <michael.dehaan AT gmail> 6 John Eckersberg <jeckersb@redhat.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 2 of the License, or 11 (at your option) any later version. 12 13 This program is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program; if not, write to the Free Software 20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 21 02110-1301 USA 22 """ 23 24 import time 25 26 import cobbler.utils as utils 27 from cobbler.manager import ManagerModule 28 29 MANAGER = None 30 31 32 def register() -> str: 33 """ 34 The mandatory Cobbler modules registration hook. 35 36 :return: Always "manage". 37 """ 38 return "manage" 39 40 41 class _DnsmasqManager(ManagerModule): 42 """ 43 Handles conversion of internal state to the tftpboot tree layout. 44 """ 45 46 @staticmethod 47 def what() -> str: 48 """ 49 This identifies the module. 50 51 :return: Will always return ``dnsmasq``. 52 """ 53 return "dnsmasq" 54 55 def write_configs(self): 56 """ 57 DHCP files are written when ``manage_dhcp`` is set in our settings. 58 59 :raises OSError 60 """ 61 62 settings_file = "/etc/dnsmasq.conf" 63 template_file = "/etc/cobbler/dnsmasq.template" 64 65 try: 66 f2 = open(template_file, "r") 67 except Exception: 68 raise OSError("error writing template to file: %s" % template_file) 69 template_data = f2.read() 70 f2.close() 71 72 system_definitions = {} 73 74 # we used to just loop through each system, but now we must loop 75 # through each network interface of each system. 76 77 for system in self.systems: 78 79 if not system.is_management_supported(cidr_ok=False): 80 continue 81 82 profile = system.get_conceptual_parent() 83 distro = profile.get_conceptual_parent() 84 for interface in system.interfaces.values(): 85 86 mac = interface.mac_address 87 ip = interface.ip_address 88 host = interface.dns_name 89 ipv6 = interface.ipv6_address 90 91 if not mac: 92 # can't write a DHCP entry for this system 93 continue 94 95 # In many reallife situations there is a need to control the IP address and hostname for a specific 96 # client when only the MAC address is available. In addition to that in some scenarios there is a need 97 # to explicitly label a host with the applicable architecture in order to correctly handle situations 98 # where we need something other than ``pxelinux.0``. So we always write a dhcp-host entry with as much 99 # info as possible to allow maximum control and flexibility within the dnsmasq config. 100 101 systxt = "dhcp-host=net:" + distro.arch.value.lower() + "," + mac 102 103 if host != "": 104 systxt += "," + host 105 106 if ip != "": 107 systxt += "," + ip 108 if ipv6 != "": 109 systxt += ",[%s]" % ipv6 110 111 systxt += "\n" 112 113 dhcp_tag = interface.dhcp_tag 114 if dhcp_tag == "": 115 dhcp_tag = "default" 116 117 if dhcp_tag not in system_definitions: 118 system_definitions[dhcp_tag] = "" 119 system_definitions[dhcp_tag] = system_definitions[dhcp_tag] + systxt 120 121 # We are now done with the looping through each interface of each system. 122 123 metadata = { 124 "insert_cobbler_system_definitions": system_definitions.get("default", ""), 125 "date": time.asctime(time.gmtime()), 126 "cobbler_server": self.settings.server, 127 "next_server_v4": self.settings.next_server_v4, 128 "next_server_v6": self.settings.next_server_v6, 129 } 130 131 # now add in other DHCP expansions that are not tagged with "default" 132 for x in list(system_definitions.keys()): 133 if x == "default": 134 continue 135 metadata["insert_cobbler_system_definitions_%s" % x] = system_definitions[x] 136 137 self.templar.render(template_data, metadata, settings_file) 138 139 def regen_ethers(self): 140 """ 141 This function regenerates the ethers file. To get more information please read ``man ethers``, the format is 142 also in there described. 143 """ 144 # dnsmasq knows how to read this database of MACs -> IPs, so we'll keep it up to date every time we add a 145 # system. 146 fh = open("/etc/ethers", "w+") 147 for system in self.systems: 148 if not system.is_management_supported(cidr_ok=False): 149 continue 150 for interface in system.interfaces.values(): 151 mac = interface.mac_address 152 ip = interface.ip_address 153 if not mac: 154 # can't write this w/o a MAC address 155 continue 156 if ip is not None and ip != "": 157 fh.write(mac.upper() + "\t" + ip + "\n") 158 fh.close() 159 160 def regen_hosts(self): 161 """ 162 This rewrites the hosts file and thus also rewrites the dns config. 163 """ 164 # dnsmasq knows how to read this database for host info (other things may also make use of this later) 165 fh = open("/var/lib/cobbler/cobbler_hosts", "w+") 166 for system in self.systems: 167 if not system.is_management_supported(cidr_ok=False): 168 continue 169 for (_, interface) in system.interfaces.items(): 170 mac = interface.mac_address 171 host = interface.dns_name 172 ip = interface.ip_address 173 ipv6 = interface.ipv6_address 174 if not mac: 175 continue 176 if host is not None and host != "" and ipv6 is not None and ipv6 != "": 177 fh.write(ipv6 + "\t" + host + "\n") 178 elif host is not None and host != "" and ip is not None and ip != "": 179 fh.write(ip + "\t" + host + "\n") 180 fh.close() 181 182 def restart_service(self): 183 """ 184 This restarts the dhcp server and thus applied the newly written config files. 185 """ 186 service_name = "dnsmasq" 187 if self.settings.restart_dhcp: 188 return_code_service_restart = utils.service_restart(service_name) 189 if return_code_service_restart != 0: 190 self.logger.error("%s service failed", service_name) 191 return return_code_service_restart 192 193 194 def get_manager(api): 195 """ 196 Creates a manager object to manage a dnsmasq server. 197 198 :param api: The API to resolve all information with. 199 :return: The object generated from the class. 200 """ 201 # Singleton used, therefore ignoring 'global' 202 global MANAGER # pylint: disable=global-statement 203 204 if not MANAGER: 205 MANAGER = _DnsmasqManager(api) 206 return MANAGER 207 [end of cobbler/modules/managers/dnsmasq.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cobbler/modules/managers/dnsmasq.py b/cobbler/modules/managers/dnsmasq.py --- a/cobbler/modules/managers/dnsmasq.py +++ b/cobbler/modules/managers/dnsmasq.py @@ -169,14 +169,21 @@ for (_, interface) in system.interfaces.items(): mac = interface.mac_address host = interface.dns_name + cnames = " ".join(interface.cnames) ip = interface.ip_address ipv6 = interface.ipv6_address if not mac: continue if host is not None and host != "" and ipv6 is not None and ipv6 != "": - fh.write(ipv6 + "\t" + host + "\n") + if cnames: + fh.write(ipv6 + "\t" + host + ' ' + cnames + "\n") + else: + fh.write(ipv6 + "\t" + host + "\n") elif host is not None and host != "" and ip is not None and ip != "": - fh.write(ip + "\t" + host + "\n") + if cnames: + fh.write(ip + "\t" + host + ' ' + cnames + "\n") + else: + fh.write(ip + "\t" + host + "\n") fh.close() def restart_service(self):
{"golden_diff": "diff --git a/cobbler/modules/managers/dnsmasq.py b/cobbler/modules/managers/dnsmasq.py\n--- a/cobbler/modules/managers/dnsmasq.py\n+++ b/cobbler/modules/managers/dnsmasq.py\n@@ -169,14 +169,21 @@\n for (_, interface) in system.interfaces.items():\n mac = interface.mac_address\n host = interface.dns_name\n+ cnames = \" \".join(interface.cnames)\n ip = interface.ip_address\n ipv6 = interface.ipv6_address\n if not mac:\n continue\n if host is not None and host != \"\" and ipv6 is not None and ipv6 != \"\":\n- fh.write(ipv6 + \"\\t\" + host + \"\\n\")\n+ if cnames:\n+ fh.write(ipv6 + \"\\t\" + host + ' ' + cnames + \"\\n\")\n+ else:\n+ fh.write(ipv6 + \"\\t\" + host + \"\\n\")\n elif host is not None and host != \"\" and ip is not None and ip != \"\":\n- fh.write(ip + \"\\t\" + host + \"\\n\")\n+ if cnames:\n+ fh.write(ip + \"\\t\" + host + ' ' + cnames + \"\\n\")\n+ else:\n+ fh.write(ip + \"\\t\" + host + \"\\n\")\n fh.close()\n \n def restart_service(self):\n", "issue": "[Backport] Add support for cnames in dnsmasq\n### Original feature issue\r\n\r\n- Issue: #1643\r\n- PR: #3640\r\n\r\n### Target release\r\n\r\n- [x] release33\r\n- [ ] release32\r\n- [ ] release30\r\n\r\n### Reason\r\n\r\nStabilization of Cobbler 3.3.5\r\n\n", "before_files": [{"content": "\"\"\"\nThis is some of the code behind 'cobbler sync'.\n\nCopyright 2006-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\nJohn Eckersberg <jeckersb@redhat.com>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\nimport time\n\nimport cobbler.utils as utils\nfrom cobbler.manager import ManagerModule\n\nMANAGER = None\n\n\ndef register() -> str:\n \"\"\"\n The mandatory Cobbler modules registration hook.\n\n :return: Always \"manage\".\n \"\"\"\n return \"manage\"\n\n\nclass _DnsmasqManager(ManagerModule):\n \"\"\"\n Handles conversion of internal state to the tftpboot tree layout.\n \"\"\"\n\n @staticmethod\n def what() -> str:\n \"\"\"\n This identifies the module.\n\n :return: Will always return ``dnsmasq``.\n \"\"\"\n return \"dnsmasq\"\n\n def write_configs(self):\n \"\"\"\n DHCP files are written when ``manage_dhcp`` is set in our settings.\n\n :raises OSError\n \"\"\"\n\n settings_file = \"/etc/dnsmasq.conf\"\n template_file = \"/etc/cobbler/dnsmasq.template\"\n\n try:\n f2 = open(template_file, \"r\")\n except Exception:\n raise OSError(\"error writing template to file: %s\" % template_file)\n template_data = f2.read()\n f2.close()\n\n system_definitions = {}\n\n # we used to just loop through each system, but now we must loop\n # through each network interface of each system.\n\n for system in self.systems:\n\n if not system.is_management_supported(cidr_ok=False):\n continue\n\n profile = system.get_conceptual_parent()\n distro = profile.get_conceptual_parent()\n for interface in system.interfaces.values():\n\n mac = interface.mac_address\n ip = interface.ip_address\n host = interface.dns_name\n ipv6 = interface.ipv6_address\n\n if not mac:\n # can't write a DHCP entry for this system\n continue\n\n # In many reallife situations there is a need to control the IP address and hostname for a specific\n # client when only the MAC address is available. In addition to that in some scenarios there is a need\n # to explicitly label a host with the applicable architecture in order to correctly handle situations\n # where we need something other than ``pxelinux.0``. So we always write a dhcp-host entry with as much\n # info as possible to allow maximum control and flexibility within the dnsmasq config.\n\n systxt = \"dhcp-host=net:\" + distro.arch.value.lower() + \",\" + mac\n\n if host != \"\":\n systxt += \",\" + host\n\n if ip != \"\":\n systxt += \",\" + ip\n if ipv6 != \"\":\n systxt += \",[%s]\" % ipv6\n\n systxt += \"\\n\"\n\n dhcp_tag = interface.dhcp_tag\n if dhcp_tag == \"\":\n dhcp_tag = \"default\"\n\n if dhcp_tag not in system_definitions:\n system_definitions[dhcp_tag] = \"\"\n system_definitions[dhcp_tag] = system_definitions[dhcp_tag] + systxt\n\n # We are now done with the looping through each interface of each system.\n\n metadata = {\n \"insert_cobbler_system_definitions\": system_definitions.get(\"default\", \"\"),\n \"date\": time.asctime(time.gmtime()),\n \"cobbler_server\": self.settings.server,\n \"next_server_v4\": self.settings.next_server_v4,\n \"next_server_v6\": self.settings.next_server_v6,\n }\n\n # now add in other DHCP expansions that are not tagged with \"default\"\n for x in list(system_definitions.keys()):\n if x == \"default\":\n continue\n metadata[\"insert_cobbler_system_definitions_%s\" % x] = system_definitions[x]\n\n self.templar.render(template_data, metadata, settings_file)\n\n def regen_ethers(self):\n \"\"\"\n This function regenerates the ethers file. To get more information please read ``man ethers``, the format is\n also in there described.\n \"\"\"\n # dnsmasq knows how to read this database of MACs -> IPs, so we'll keep it up to date every time we add a\n # system.\n fh = open(\"/etc/ethers\", \"w+\")\n for system in self.systems:\n if not system.is_management_supported(cidr_ok=False):\n continue\n for interface in system.interfaces.values():\n mac = interface.mac_address\n ip = interface.ip_address\n if not mac:\n # can't write this w/o a MAC address\n continue\n if ip is not None and ip != \"\":\n fh.write(mac.upper() + \"\\t\" + ip + \"\\n\")\n fh.close()\n\n def regen_hosts(self):\n \"\"\"\n This rewrites the hosts file and thus also rewrites the dns config.\n \"\"\"\n # dnsmasq knows how to read this database for host info (other things may also make use of this later)\n fh = open(\"/var/lib/cobbler/cobbler_hosts\", \"w+\")\n for system in self.systems:\n if not system.is_management_supported(cidr_ok=False):\n continue\n for (_, interface) in system.interfaces.items():\n mac = interface.mac_address\n host = interface.dns_name\n ip = interface.ip_address\n ipv6 = interface.ipv6_address\n if not mac:\n continue\n if host is not None and host != \"\" and ipv6 is not None and ipv6 != \"\":\n fh.write(ipv6 + \"\\t\" + host + \"\\n\")\n elif host is not None and host != \"\" and ip is not None and ip != \"\":\n fh.write(ip + \"\\t\" + host + \"\\n\")\n fh.close()\n\n def restart_service(self):\n \"\"\"\n This restarts the dhcp server and thus applied the newly written config files.\n \"\"\"\n service_name = \"dnsmasq\"\n if self.settings.restart_dhcp:\n return_code_service_restart = utils.service_restart(service_name)\n if return_code_service_restart != 0:\n self.logger.error(\"%s service failed\", service_name)\n return return_code_service_restart\n\n\ndef get_manager(api):\n \"\"\"\n Creates a manager object to manage a dnsmasq server.\n\n :param api: The API to resolve all information with.\n :return: The object generated from the class.\n \"\"\"\n # Singleton used, therefore ignoring 'global'\n global MANAGER # pylint: disable=global-statement\n\n if not MANAGER:\n MANAGER = _DnsmasqManager(api)\n return MANAGER\n", "path": "cobbler/modules/managers/dnsmasq.py"}]}
2,760
316
gh_patches_debug_3450
rasdani/github-patches
git_diff
astronomer__astro-sdk-176
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use standard AWS environment variables **Context** At the moment, Astro 0.6.x uses a custom environment variable `AIRFLOW__ASTRO__CONN_AWS_DEFAULT` to define AWS credentials. However, there are standard [AWS environment variables to define credentials](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#using-environment-variables). **Acceptance criteria** * Replace any occurrence of `AIRFLOW__ASTRO__CONN_AWS_DEFAULT` by `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` </issue> <code> [start of src/astro/utils/cloud_storage_creds.py] 1 import json 2 import os 3 from urllib import parse 4 5 from airflow.hooks.base import BaseHook 6 7 from astro.utils.dependencies import ( 8 AwsBaseHook, 9 BotoSession, 10 GCSClient, 11 GCSHook, 12 google_service_account, 13 ) 14 15 16 def parse_s3_env_var(): 17 raw_data = ( 18 os.environ["AIRFLOW__ASTRO__CONN_AWS_DEFAULT"] 19 .replace("%2F", "/") 20 .replace("aws://", "") 21 .replace("@", "") 22 .split(":") 23 ) 24 return [parse.unquote(r) for r in raw_data] 25 26 27 def s3fs_creds(conn_id=None): 28 """Structure s3fs credentials from Airflow connection. 29 s3fs enables pandas to write to s3 30 """ 31 if conn_id: 32 # The following line raises a friendly exception 33 BaseHook.get_connection(conn_id) 34 aws_hook = AwsBaseHook(conn_id, client_type="S3") 35 session = aws_hook.get_session() 36 else: 37 key, secret = parse_s3_env_var() 38 session = BotoSession( 39 aws_access_key_id=key, 40 aws_secret_access_key=secret, 41 ) 42 return dict(client=session.client("s3")) 43 44 45 def gcs_client(conn_id=None): 46 """ 47 get GCS credentials for storage. 48 """ 49 if conn_id: 50 gcs_hook = GCSHook(conn_id) 51 client = gcs_hook.get_conn() 52 else: 53 client = GCSClient() 54 55 return dict(client=client) 56 [end of src/astro/utils/cloud_storage_creds.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/astro/utils/cloud_storage_creds.py b/src/astro/utils/cloud_storage_creds.py --- a/src/astro/utils/cloud_storage_creds.py +++ b/src/astro/utils/cloud_storage_creds.py @@ -14,14 +14,7 @@ def parse_s3_env_var(): - raw_data = ( - os.environ["AIRFLOW__ASTRO__CONN_AWS_DEFAULT"] - .replace("%2F", "/") - .replace("aws://", "") - .replace("@", "") - .split(":") - ) - return [parse.unquote(r) for r in raw_data] + return os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"] def s3fs_creds(conn_id=None):
{"golden_diff": "diff --git a/src/astro/utils/cloud_storage_creds.py b/src/astro/utils/cloud_storage_creds.py\n--- a/src/astro/utils/cloud_storage_creds.py\n+++ b/src/astro/utils/cloud_storage_creds.py\n@@ -14,14 +14,7 @@\n \n \n def parse_s3_env_var():\n- raw_data = (\n- os.environ[\"AIRFLOW__ASTRO__CONN_AWS_DEFAULT\"]\n- .replace(\"%2F\", \"/\")\n- .replace(\"aws://\", \"\")\n- .replace(\"@\", \"\")\n- .split(\":\")\n- )\n- return [parse.unquote(r) for r in raw_data]\n+ return os.environ[\"AWS_ACCESS_KEY_ID\"], os.environ[\"AWS_SECRET_ACCESS_KEY\"]\n \n \n def s3fs_creds(conn_id=None):\n", "issue": "Use standard AWS environment variables\n**Context**\r\nAt the moment, Astro 0.6.x uses a custom environment variable `AIRFLOW__ASTRO__CONN_AWS_DEFAULT` to define AWS credentials. However, there are standard [AWS environment variables to define credentials](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#using-environment-variables).\r\n\r\n**Acceptance criteria**\r\n* Replace any occurrence of `AIRFLOW__ASTRO__CONN_AWS_DEFAULT` by `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`\n", "before_files": [{"content": "import json\nimport os\nfrom urllib import parse\n\nfrom airflow.hooks.base import BaseHook\n\nfrom astro.utils.dependencies import (\n AwsBaseHook,\n BotoSession,\n GCSClient,\n GCSHook,\n google_service_account,\n)\n\n\ndef parse_s3_env_var():\n raw_data = (\n os.environ[\"AIRFLOW__ASTRO__CONN_AWS_DEFAULT\"]\n .replace(\"%2F\", \"/\")\n .replace(\"aws://\", \"\")\n .replace(\"@\", \"\")\n .split(\":\")\n )\n return [parse.unquote(r) for r in raw_data]\n\n\ndef s3fs_creds(conn_id=None):\n \"\"\"Structure s3fs credentials from Airflow connection.\n s3fs enables pandas to write to s3\n \"\"\"\n if conn_id:\n # The following line raises a friendly exception\n BaseHook.get_connection(conn_id)\n aws_hook = AwsBaseHook(conn_id, client_type=\"S3\")\n session = aws_hook.get_session()\n else:\n key, secret = parse_s3_env_var()\n session = BotoSession(\n aws_access_key_id=key,\n aws_secret_access_key=secret,\n )\n return dict(client=session.client(\"s3\"))\n\n\ndef gcs_client(conn_id=None):\n \"\"\"\n get GCS credentials for storage.\n \"\"\"\n if conn_id:\n gcs_hook = GCSHook(conn_id)\n client = gcs_hook.get_conn()\n else:\n client = GCSClient()\n\n return dict(client=client)\n", "path": "src/astro/utils/cloud_storage_creds.py"}]}
1,086
171
gh_patches_debug_33725
rasdani/github-patches
git_diff
modoboa__modoboa-1859
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dashboard - server behind proxy # Impacted versions * Modoboa: 1.14.0 * Webserver: Nginx # Steps to reproduce Modoboa server is behind proxy, so no internet direct access Acces dashboard via admin account # Current behavior 504 Gateway Time-out # Expected behavior no error </issue> <code> [start of modoboa/core/views/dashboard.py] 1 """Core dashboard views.""" 2 3 import feedparser 4 import requests 5 from dateutil import parser 6 from requests.exceptions import RequestException 7 8 from django.contrib.auth import mixins as auth_mixins 9 from django.views import generic 10 11 from .. import signals 12 13 MODOBOA_WEBSITE_URL = "https://modoboa.org/" 14 15 16 class DashboardView(auth_mixins.AccessMixin, generic.TemplateView): 17 """Dashboard view.""" 18 19 template_name = "core/dashboard.html" 20 21 def dispatch(self, request, *args, **kwargs): 22 """Check if user can access dashboard.""" 23 if not request.user.is_authenticated or not request.user.is_admin: 24 return self.handle_no_permission() 25 return super(DashboardView, self).dispatch(request, *args, **kwargs) 26 27 def get_context_data(self, **kwargs): 28 """Add context variables.""" 29 context = super(DashboardView, self).get_context_data(**kwargs) 30 context.update({ 31 "selection": "dashboard", "widgets": {"left": [], "right": []} 32 }) 33 # Fetch latest news 34 if self.request.user.language == "fr": 35 lang = "fr" 36 else: 37 lang = "en" 38 context.update({"selection": "dashboard"}) 39 40 feed_url = "{}{}/weblog/feeds/".format(MODOBOA_WEBSITE_URL, lang) 41 if self.request.user.role != "SuperAdmins": 42 custom_feed_url = ( 43 self.request.localconfig.parameters.get_value("rss_feed_url")) 44 if custom_feed_url: 45 feed_url = custom_feed_url 46 posts = feedparser.parse(feed_url) 47 entries = [] 48 for entry in posts["entries"][:5]: 49 entry["published"] = parser.parse(entry["published"]) 50 entries.append(entry) 51 context["widgets"]["left"].append("core/_latest_news_widget.html") 52 context.update({"news": entries}) 53 54 hide_features_widget = self.request.localconfig.parameters.get_value( 55 "hide_features_widget") 56 if self.request.user.is_superuser or not hide_features_widget: 57 url = "{}{}/api/projects/?featured=true".format( 58 MODOBOA_WEBSITE_URL, lang) 59 features = [] 60 try: 61 response = requests.get(url) 62 except RequestException: 63 pass 64 else: 65 if response.status_code == 200: 66 features = response.json() 67 context["widgets"]["right"].append("core/_current_features.html") 68 context.update({"features": features}) 69 70 # Extra widgets 71 result = signals.extra_admin_dashboard_widgets.send( 72 sender=self.__class__, user=self.request.user) 73 for _receiver, widgets in result: 74 for widget in widgets: 75 context["widgets"][widget["column"]].append( 76 widget["template"]) 77 # FIXME: can raise conflicts... 78 context.update(widget["context"]) 79 80 return context 81 [end of modoboa/core/views/dashboard.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/modoboa/core/views/dashboard.py b/modoboa/core/views/dashboard.py --- a/modoboa/core/views/dashboard.py +++ b/modoboa/core/views/dashboard.py @@ -8,6 +8,8 @@ from django.contrib.auth import mixins as auth_mixins from django.views import generic +from django.conf import settings + from .. import signals MODOBOA_WEBSITE_URL = "https://modoboa.org/" @@ -43,11 +45,12 @@ self.request.localconfig.parameters.get_value("rss_feed_url")) if custom_feed_url: feed_url = custom_feed_url - posts = feedparser.parse(feed_url) entries = [] - for entry in posts["entries"][:5]: - entry["published"] = parser.parse(entry["published"]) - entries.append(entry) + if not settings.DISABLE_DASHBOARD_EXTERNAL_QUERIES: + posts = feedparser.parse(feed_url) + for entry in posts["entries"][:5]: + entry["published"] = parser.parse(entry["published"]) + entries.append(entry) context["widgets"]["left"].append("core/_latest_news_widget.html") context.update({"news": entries}) @@ -57,13 +60,14 @@ url = "{}{}/api/projects/?featured=true".format( MODOBOA_WEBSITE_URL, lang) features = [] - try: - response = requests.get(url) - except RequestException: - pass - else: - if response.status_code == 200: - features = response.json() + if not settings.DISABLE_DASHBOARD_EXTERNAL_QUERIES: + try: + response = requests.get(url) + except RequestException: + pass + else: + if response.status_code == 200: + features = response.json() context["widgets"]["right"].append("core/_current_features.html") context.update({"features": features})
{"golden_diff": "diff --git a/modoboa/core/views/dashboard.py b/modoboa/core/views/dashboard.py\n--- a/modoboa/core/views/dashboard.py\n+++ b/modoboa/core/views/dashboard.py\n@@ -8,6 +8,8 @@\n from django.contrib.auth import mixins as auth_mixins\n from django.views import generic\n \n+from django.conf import settings\n+\n from .. import signals\n \n MODOBOA_WEBSITE_URL = \"https://modoboa.org/\"\n@@ -43,11 +45,12 @@\n self.request.localconfig.parameters.get_value(\"rss_feed_url\"))\n if custom_feed_url:\n feed_url = custom_feed_url\n- posts = feedparser.parse(feed_url)\n entries = []\n- for entry in posts[\"entries\"][:5]:\n- entry[\"published\"] = parser.parse(entry[\"published\"])\n- entries.append(entry)\n+ if not settings.DISABLE_DASHBOARD_EXTERNAL_QUERIES:\n+ posts = feedparser.parse(feed_url)\n+ for entry in posts[\"entries\"][:5]:\n+ entry[\"published\"] = parser.parse(entry[\"published\"])\n+ entries.append(entry)\n context[\"widgets\"][\"left\"].append(\"core/_latest_news_widget.html\")\n context.update({\"news\": entries})\n \n@@ -57,13 +60,14 @@\n url = \"{}{}/api/projects/?featured=true\".format(\n MODOBOA_WEBSITE_URL, lang)\n features = []\n- try:\n- response = requests.get(url)\n- except RequestException:\n- pass\n- else:\n- if response.status_code == 200:\n- features = response.json()\n+ if not settings.DISABLE_DASHBOARD_EXTERNAL_QUERIES:\n+ try:\n+ response = requests.get(url)\n+ except RequestException:\n+ pass\n+ else:\n+ if response.status_code == 200:\n+ features = response.json()\n context[\"widgets\"][\"right\"].append(\"core/_current_features.html\")\n context.update({\"features\": features})\n", "issue": "Dashboard - server behind proxy\n# Impacted versions\r\n\r\n* Modoboa: 1.14.0\r\n* Webserver: Nginx\r\n\r\n# Steps to reproduce\r\nModoboa server is behind proxy, so no internet direct access\r\nAcces dashboard via admin account\r\n\r\n# Current behavior\r\n504 Gateway Time-out\r\n\r\n# Expected behavior\r\nno error\r\n\n", "before_files": [{"content": "\"\"\"Core dashboard views.\"\"\"\n\nimport feedparser\nimport requests\nfrom dateutil import parser\nfrom requests.exceptions import RequestException\n\nfrom django.contrib.auth import mixins as auth_mixins\nfrom django.views import generic\n\nfrom .. import signals\n\nMODOBOA_WEBSITE_URL = \"https://modoboa.org/\"\n\n\nclass DashboardView(auth_mixins.AccessMixin, generic.TemplateView):\n \"\"\"Dashboard view.\"\"\"\n\n template_name = \"core/dashboard.html\"\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Check if user can access dashboard.\"\"\"\n if not request.user.is_authenticated or not request.user.is_admin:\n return self.handle_no_permission()\n return super(DashboardView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"Add context variables.\"\"\"\n context = super(DashboardView, self).get_context_data(**kwargs)\n context.update({\n \"selection\": \"dashboard\", \"widgets\": {\"left\": [], \"right\": []}\n })\n # Fetch latest news\n if self.request.user.language == \"fr\":\n lang = \"fr\"\n else:\n lang = \"en\"\n context.update({\"selection\": \"dashboard\"})\n\n feed_url = \"{}{}/weblog/feeds/\".format(MODOBOA_WEBSITE_URL, lang)\n if self.request.user.role != \"SuperAdmins\":\n custom_feed_url = (\n self.request.localconfig.parameters.get_value(\"rss_feed_url\"))\n if custom_feed_url:\n feed_url = custom_feed_url\n posts = feedparser.parse(feed_url)\n entries = []\n for entry in posts[\"entries\"][:5]:\n entry[\"published\"] = parser.parse(entry[\"published\"])\n entries.append(entry)\n context[\"widgets\"][\"left\"].append(\"core/_latest_news_widget.html\")\n context.update({\"news\": entries})\n\n hide_features_widget = self.request.localconfig.parameters.get_value(\n \"hide_features_widget\")\n if self.request.user.is_superuser or not hide_features_widget:\n url = \"{}{}/api/projects/?featured=true\".format(\n MODOBOA_WEBSITE_URL, lang)\n features = []\n try:\n response = requests.get(url)\n except RequestException:\n pass\n else:\n if response.status_code == 200:\n features = response.json()\n context[\"widgets\"][\"right\"].append(\"core/_current_features.html\")\n context.update({\"features\": features})\n\n # Extra widgets\n result = signals.extra_admin_dashboard_widgets.send(\n sender=self.__class__, user=self.request.user)\n for _receiver, widgets in result:\n for widget in widgets:\n context[\"widgets\"][widget[\"column\"]].append(\n widget[\"template\"])\n # FIXME: can raise conflicts...\n context.update(widget[\"context\"])\n\n return context\n", "path": "modoboa/core/views/dashboard.py"}]}
1,363
432