problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
10.2k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
582
21k
num_tokens
int64
271
2.05k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_57244
rasdani/github-patches
git_diff
meltano__meltano-6333
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Uvicorn Windows message points to wrong flag ```2022-06-30T19:52:16.704167Z [warning ] Add ff.start_uvicorn: True to your meltano.yml to supress this waring``` Should point to ff.enable_uvicorn [env: MELTANO_FF_ENABLE_UVICORN] current value: False (default) Should be an easy one! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/meltano/api/workers/api_worker.py` Content: ``` 1 """Starts WSGI Webserver that will run the API App for a Meltano Project.""" 2 import logging 3 import platform 4 import threading 5 6 from meltano.core.meltano_invoker import MeltanoInvoker 7 from meltano.core.project import Project 8 from meltano.core.project_settings_service import ProjectSettingsService 9 from meltano.core.settings_service import FeatureFlags 10 from meltano.core.utils.pidfile import PIDFile 11 12 13 class APIWorker(threading.Thread): 14 """The Base APIWorker Class.""" 15 16 def __init__(self, project: Project, reload=False): 17 """Initialize the API Worker class with the project config. 18 19 Args: 20 project: Project class. 21 reload: Boolean. 22 """ 23 super().__init__() 24 25 self.project = project 26 self.reload = reload 27 self.pid_file = PIDFile(self.project.run_dir("gunicorn.pid")) 28 self.settings_service = ProjectSettingsService(self.project.find()) 29 30 def run(self): 31 """Run the initalized API Workers with the App Server requested.""" 32 with self.settings_service.feature_flag( 33 FeatureFlags.ENABLE_UVICORN, raise_error=False 34 ) as allow: 35 36 enable_uvicorn = allow 37 38 # Use Uvicorn when on Windows 39 if platform.system() == "Windows": 40 if enable_uvicorn: 41 logging.debug("ff.enable_uvicorn enabled, starting uvicorn.") 42 else: 43 logging.warning( 44 "Windows OS detected auto setting ff.enable_uvicorn" 45 ) 46 logging.warning( 47 "Add ff.start_uvicorn: True to your meltano.yml to supress this waring" 48 ) 49 enable_uvicorn = True 50 51 # Start uvicorn to serve API and Ui 52 if enable_uvicorn: 53 settings_for_apiworker = self.settings_service 54 55 arg_bind_host = str(settings_for_apiworker.get("ui.bind_host")) 56 arg_bind_port = str(settings_for_apiworker.get("ui.bind_port")) 57 arg_loglevel = str(settings_for_apiworker.get("cli.log_level")) 58 arg_forwarded_allow_ips = str( 59 settings_for_apiworker.get("ui.forwarded_allow_ips") 60 ) 61 62 # If windows and 127.0.0.1 only allowed changing bind host to accomidate 63 if platform.system() == "Windows": 64 if ( 65 arg_forwarded_allow_ips == "127.0.0.1" 66 and arg_bind_host == "0.0.0.0" # noqa: S104 67 ): 68 # If left at 0.0.0.0 the server will respond to any request receieved on any interface 69 arg_bind_host = "127.0.0.1" 70 71 # Setup args for uvicorn using bind info from the project setings service 72 args = [ 73 "--host", 74 arg_bind_host, 75 "--port", 76 arg_bind_port, 77 "--loop", 78 "asyncio", 79 "--interface", 80 "wsgi", 81 "--log-level", 82 arg_loglevel, 83 "--forwarded-allow-ips", 84 arg_forwarded_allow_ips, 85 "--timeout-keep-alive", 86 "600", 87 ] 88 89 # Add reload argument if reload is true 90 if self.reload: 91 92 args += [ 93 "--reload", 94 ] 95 96 # Add the Meltano API app, factory create_app function combo to the args 97 args += [ 98 "--factory", 99 "meltano.api.app:create_app", 100 ] 101 102 # Start uvicorn using the MeltanoInvoker 103 MeltanoInvoker(self.project).invoke(args, command="uvicorn") 104 105 else: 106 # Use Gunicorn when feature flag start_uvicorn is not set 107 108 args = [ 109 "--config", 110 "python:meltano.api.wsgi", 111 "--pid", 112 str(self.pid_file), 113 ] 114 115 if self.reload: 116 args += ["--reload"] 117 118 args += ["meltano.api.app:create_app()"] 119 120 MeltanoInvoker(self.project).invoke(args, command="gunicorn") 121 122 def pid_path(self): 123 """Give the path name of the projects gunicorn.pid file location. 124 125 Returns: 126 Path object that gives the direct locationo of the gunicorn.pid file. 127 """ 128 return self.project.run_dir("gunicorn.pid") 129 130 def stop(self): 131 """Terminnate active gunicorn workers that have placed a PID in the project's gunicorn.pid file.""" 132 self.pid_file.process.terminate() 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/meltano/api/workers/api_worker.py b/src/meltano/api/workers/api_worker.py --- a/src/meltano/api/workers/api_worker.py +++ b/src/meltano/api/workers/api_worker.py @@ -44,7 +44,7 @@ "Windows OS detected auto setting ff.enable_uvicorn" ) logging.warning( - "Add ff.start_uvicorn: True to your meltano.yml to supress this waring" + "Add ff.enable_uvicorn: True to your meltano.yml to supress this waring" ) enable_uvicorn = True
{"golden_diff": "diff --git a/src/meltano/api/workers/api_worker.py b/src/meltano/api/workers/api_worker.py\n--- a/src/meltano/api/workers/api_worker.py\n+++ b/src/meltano/api/workers/api_worker.py\n@@ -44,7 +44,7 @@\n \"Windows OS detected auto setting ff.enable_uvicorn\"\n )\n logging.warning(\n- \"Add ff.start_uvicorn: True to your meltano.yml to supress this waring\"\n+ \"Add ff.enable_uvicorn: True to your meltano.yml to supress this waring\"\n )\n enable_uvicorn = True\n", "issue": "Uvicorn Windows message points to wrong flag\n```2022-06-30T19:52:16.704167Z [warning ] Add ff.start_uvicorn: True to your meltano.yml to supress this waring```\r\n\r\nShould point to\r\n\r\nff.enable_uvicorn [env: MELTANO_FF_ENABLE_UVICORN] current value: False (default)\r\n\r\nShould be an easy one!\n", "before_files": [{"content": "\"\"\"Starts WSGI Webserver that will run the API App for a Meltano Project.\"\"\"\nimport logging\nimport platform\nimport threading\n\nfrom meltano.core.meltano_invoker import MeltanoInvoker\nfrom meltano.core.project import Project\nfrom meltano.core.project_settings_service import ProjectSettingsService\nfrom meltano.core.settings_service import FeatureFlags\nfrom meltano.core.utils.pidfile import PIDFile\n\n\nclass APIWorker(threading.Thread):\n \"\"\"The Base APIWorker Class.\"\"\"\n\n def __init__(self, project: Project, reload=False):\n \"\"\"Initialize the API Worker class with the project config.\n\n Args:\n project: Project class.\n reload: Boolean.\n \"\"\"\n super().__init__()\n\n self.project = project\n self.reload = reload\n self.pid_file = PIDFile(self.project.run_dir(\"gunicorn.pid\"))\n self.settings_service = ProjectSettingsService(self.project.find())\n\n def run(self):\n \"\"\"Run the initalized API Workers with the App Server requested.\"\"\"\n with self.settings_service.feature_flag(\n FeatureFlags.ENABLE_UVICORN, raise_error=False\n ) as allow:\n\n enable_uvicorn = allow\n\n # Use Uvicorn when on Windows\n if platform.system() == \"Windows\":\n if enable_uvicorn:\n logging.debug(\"ff.enable_uvicorn enabled, starting uvicorn.\")\n else:\n logging.warning(\n \"Windows OS detected auto setting ff.enable_uvicorn\"\n )\n logging.warning(\n \"Add ff.start_uvicorn: True to your meltano.yml to supress this waring\"\n )\n enable_uvicorn = True\n\n # Start uvicorn to serve API and Ui\n if enable_uvicorn:\n settings_for_apiworker = self.settings_service\n\n arg_bind_host = str(settings_for_apiworker.get(\"ui.bind_host\"))\n arg_bind_port = str(settings_for_apiworker.get(\"ui.bind_port\"))\n arg_loglevel = str(settings_for_apiworker.get(\"cli.log_level\"))\n arg_forwarded_allow_ips = str(\n settings_for_apiworker.get(\"ui.forwarded_allow_ips\")\n )\n\n # If windows and 127.0.0.1 only allowed changing bind host to accomidate\n if platform.system() == \"Windows\":\n if (\n arg_forwarded_allow_ips == \"127.0.0.1\"\n and arg_bind_host == \"0.0.0.0\" # noqa: S104\n ):\n # If left at 0.0.0.0 the server will respond to any request receieved on any interface\n arg_bind_host = \"127.0.0.1\"\n\n # Setup args for uvicorn using bind info from the project setings service\n args = [\n \"--host\",\n arg_bind_host,\n \"--port\",\n arg_bind_port,\n \"--loop\",\n \"asyncio\",\n \"--interface\",\n \"wsgi\",\n \"--log-level\",\n arg_loglevel,\n \"--forwarded-allow-ips\",\n arg_forwarded_allow_ips,\n \"--timeout-keep-alive\",\n \"600\",\n ]\n\n # Add reload argument if reload is true\n if self.reload:\n\n args += [\n \"--reload\",\n ]\n\n # Add the Meltano API app, factory create_app function combo to the args\n args += [\n \"--factory\",\n \"meltano.api.app:create_app\",\n ]\n\n # Start uvicorn using the MeltanoInvoker\n MeltanoInvoker(self.project).invoke(args, command=\"uvicorn\")\n\n else:\n # Use Gunicorn when feature flag start_uvicorn is not set\n\n args = [\n \"--config\",\n \"python:meltano.api.wsgi\",\n \"--pid\",\n str(self.pid_file),\n ]\n\n if self.reload:\n args += [\"--reload\"]\n\n args += [\"meltano.api.app:create_app()\"]\n\n MeltanoInvoker(self.project).invoke(args, command=\"gunicorn\")\n\n def pid_path(self):\n \"\"\"Give the path name of the projects gunicorn.pid file location.\n\n Returns:\n Path object that gives the direct locationo of the gunicorn.pid file.\n \"\"\"\n return self.project.run_dir(\"gunicorn.pid\")\n\n def stop(self):\n \"\"\"Terminnate active gunicorn workers that have placed a PID in the project's gunicorn.pid file.\"\"\"\n self.pid_file.process.terminate()\n", "path": "src/meltano/api/workers/api_worker.py"}], "after_files": [{"content": "\"\"\"Starts WSGI Webserver that will run the API App for a Meltano Project.\"\"\"\nimport logging\nimport platform\nimport threading\n\nfrom meltano.core.meltano_invoker import MeltanoInvoker\nfrom meltano.core.project import Project\nfrom meltano.core.project_settings_service import ProjectSettingsService\nfrom meltano.core.settings_service import FeatureFlags\nfrom meltano.core.utils.pidfile import PIDFile\n\n\nclass APIWorker(threading.Thread):\n \"\"\"The Base APIWorker Class.\"\"\"\n\n def __init__(self, project: Project, reload=False):\n \"\"\"Initialize the API Worker class with the project config.\n\n Args:\n project: Project class.\n reload: Boolean.\n \"\"\"\n super().__init__()\n\n self.project = project\n self.reload = reload\n self.pid_file = PIDFile(self.project.run_dir(\"gunicorn.pid\"))\n self.settings_service = ProjectSettingsService(self.project.find())\n\n def run(self):\n \"\"\"Run the initalized API Workers with the App Server requested.\"\"\"\n with self.settings_service.feature_flag(\n FeatureFlags.ENABLE_UVICORN, raise_error=False\n ) as allow:\n\n enable_uvicorn = allow\n\n # Use Uvicorn when on Windows\n if platform.system() == \"Windows\":\n if enable_uvicorn:\n logging.debug(\"ff.enable_uvicorn enabled, starting uvicorn.\")\n else:\n logging.warning(\n \"Windows OS detected auto setting ff.enable_uvicorn\"\n )\n logging.warning(\n \"Add ff.enable_uvicorn: True to your meltano.yml to supress this waring\"\n )\n enable_uvicorn = True\n\n # Start uvicorn to serve API and Ui\n if enable_uvicorn:\n settings_for_apiworker = self.settings_service\n\n arg_bind_host = str(settings_for_apiworker.get(\"ui.bind_host\"))\n arg_bind_port = str(settings_for_apiworker.get(\"ui.bind_port\"))\n arg_loglevel = str(settings_for_apiworker.get(\"cli.log_level\"))\n arg_forwarded_allow_ips = str(\n settings_for_apiworker.get(\"ui.forwarded_allow_ips\")\n )\n\n # If windows and 127.0.0.1 only allowed changing bind host to accomidate\n if platform.system() == \"Windows\":\n if (\n arg_forwarded_allow_ips == \"127.0.0.1\"\n and arg_bind_host == \"0.0.0.0\" # noqa: S104\n ):\n # If left at 0.0.0.0 the server will respond to any request receieved on any interface\n arg_bind_host = \"127.0.0.1\"\n\n # Setup args for uvicorn using bind info from the project setings service\n args = [\n \"--host\",\n arg_bind_host,\n \"--port\",\n arg_bind_port,\n \"--loop\",\n \"asyncio\",\n \"--interface\",\n \"wsgi\",\n \"--log-level\",\n arg_loglevel,\n \"--forwarded-allow-ips\",\n arg_forwarded_allow_ips,\n \"--timeout-keep-alive\",\n \"600\",\n ]\n\n # Add reload argument if reload is true\n if self.reload:\n\n args += [\n \"--reload\",\n ]\n\n # Add the Meltano API app, factory create_app function combo to the args\n args += [\n \"--factory\",\n \"meltano.api.app:create_app\",\n ]\n\n # Start uvicorn using the MeltanoInvoker\n MeltanoInvoker(self.project).invoke(args, command=\"uvicorn\")\n\n else:\n # Use Gunicorn when feature flag start_uvicorn is not set\n\n args = [\n \"--config\",\n \"python:meltano.api.wsgi\",\n \"--pid\",\n str(self.pid_file),\n ]\n\n if self.reload:\n args += [\"--reload\"]\n\n args += [\"meltano.api.app:create_app()\"]\n\n MeltanoInvoker(self.project).invoke(args, command=\"gunicorn\")\n\n def pid_path(self):\n \"\"\"Give the path name of the projects gunicorn.pid file location.\n\n Returns:\n Path object that gives the direct locationo of the gunicorn.pid file.\n \"\"\"\n return self.project.run_dir(\"gunicorn.pid\")\n\n def stop(self):\n \"\"\"Terminnate active gunicorn workers that have placed a PID in the project's gunicorn.pid file.\"\"\"\n self.pid_file.process.terminate()\n", "path": "src/meltano/api/workers/api_worker.py"}]}
1,612
139
gh_patches_debug_6871
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-1495
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The worker should wait all channels are ready. If the worker has started to train and the grpc channels to connect PS are not ready, a connection error will be raised. ``` grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with: status = StatusCode.UNAVAILABLE details = "failed to connect to all addresses" debug_error_string = "{"created":"@1574320322.398282300","description":"Failed to pick subchannel","file":"src/core/ext/filters/client_channel/client_channel.cc","file_line":3876,"referenced_errors":[{"created":"@1574320322.398280426","description":"failed to connect to all addresses","file":"src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc","file_line":395,"grpc_status":14}]}" ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticdl/python/worker/main.py` Content: ``` 1 import time 2 3 import grpc 4 from kubernetes import client, config 5 6 from elasticdl.python.common import log_utils 7 from elasticdl.python.common.args import parse_worker_args 8 from elasticdl.python.common.constants import GRPC 9 from elasticdl.python.common.grpc_utils import build_channel 10 from elasticdl.python.worker.worker import Worker 11 12 13 def main(): 14 args = parse_worker_args() 15 logger = log_utils.get_logger(__name__) 16 logger.info("Starting worker %d", args.worker_id) 17 if args.master_addr is None: 18 raise ValueError("master_addr is missing for worker") 19 20 master_channel = build_channel(args.master_addr) 21 22 ps_channels = [] 23 if args.ps_addrs: 24 # TODO: use ps_addrs from master directly after ps service is working. 25 # Get ps pod ip for ps grpc connection for now. 26 ps_addrs = args.ps_addrs.split(",") 27 28 config.load_incluster_config() 29 api = client.CoreV1Api() 30 31 for addr in ps_addrs: 32 # addr is in the form as "ps-pod-name.namespace.svc:port" 33 addr_splitted = addr.split(".") 34 while True: 35 pod = api.read_namespaced_pod( 36 namespace=addr_splitted[1], name=addr_splitted[0] 37 ) 38 if pod.status.pod_ip: 39 break 40 # If ps pod is not ready yet, sleep 2 seconds and try again. 41 time.sleep(2) 42 addr = pod.status.pod_ip + ":" + addr.split(":")[-1] 43 channel = grpc.insecure_channel( 44 addr, 45 options=[ 46 ( 47 "grpc.max_send_message_length", 48 GRPC.MAX_SEND_MESSAGE_LENGTH, 49 ), 50 ( 51 "grpc.max_receive_message_length", 52 GRPC.MAX_RECEIVE_MESSAGE_LENGTH, 53 ), 54 ], 55 ) 56 ps_channels.append(channel) 57 58 worker = Worker(args, channel=master_channel, ps_channels=ps_channels) 59 worker.run() 60 61 62 if __name__ == "__main__": 63 main() 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticdl/python/worker/main.py b/elasticdl/python/worker/main.py --- a/elasticdl/python/worker/main.py +++ b/elasticdl/python/worker/main.py @@ -53,6 +53,13 @@ ), ], ) + + # Wait the channel is ready by a Future object. + grpc.channel_ready_future(channel).result() + logger.info( + "grpc channel %s to connect pod %s is ready" + % (addr, pod.metadata.name) + ) ps_channels.append(channel) worker = Worker(args, channel=master_channel, ps_channels=ps_channels)
{"golden_diff": "diff --git a/elasticdl/python/worker/main.py b/elasticdl/python/worker/main.py\n--- a/elasticdl/python/worker/main.py\n+++ b/elasticdl/python/worker/main.py\n@@ -53,6 +53,13 @@\n ),\n ],\n )\n+\n+ # Wait the channel is ready by a Future object.\n+ grpc.channel_ready_future(channel).result()\n+ logger.info(\n+ \"grpc channel %s to connect pod %s is ready\"\n+ % (addr, pod.metadata.name)\n+ )\n ps_channels.append(channel)\n \n worker = Worker(args, channel=master_channel, ps_channels=ps_channels)\n", "issue": "The worker should wait all channels are ready.\nIf the worker has started to train and the grpc channels to connect PS are not ready, a connection error will be raised.\r\n\r\n```\r\ngrpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with:\r\n\tstatus = StatusCode.UNAVAILABLE\r\n\tdetails = \"failed to connect to all addresses\"\r\n\tdebug_error_string = \"{\"created\":\"@1574320322.398282300\",\"description\":\"Failed to pick subchannel\",\"file\":\"src/core/ext/filters/client_channel/client_channel.cc\",\"file_line\":3876,\"referenced_errors\":[{\"created\":\"@1574320322.398280426\",\"description\":\"failed to connect to all addresses\",\"file\":\"src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc\",\"file_line\":395,\"grpc_status\":14}]}\"\r\n```\n", "before_files": [{"content": "import time\n\nimport grpc\nfrom kubernetes import client, config\n\nfrom elasticdl.python.common import log_utils\nfrom elasticdl.python.common.args import parse_worker_args\nfrom elasticdl.python.common.constants import GRPC\nfrom elasticdl.python.common.grpc_utils import build_channel\nfrom elasticdl.python.worker.worker import Worker\n\n\ndef main():\n args = parse_worker_args()\n logger = log_utils.get_logger(__name__)\n logger.info(\"Starting worker %d\", args.worker_id)\n if args.master_addr is None:\n raise ValueError(\"master_addr is missing for worker\")\n\n master_channel = build_channel(args.master_addr)\n\n ps_channels = []\n if args.ps_addrs:\n # TODO: use ps_addrs from master directly after ps service is working.\n # Get ps pod ip for ps grpc connection for now.\n ps_addrs = args.ps_addrs.split(\",\")\n\n config.load_incluster_config()\n api = client.CoreV1Api()\n\n for addr in ps_addrs:\n # addr is in the form as \"ps-pod-name.namespace.svc:port\"\n addr_splitted = addr.split(\".\")\n while True:\n pod = api.read_namespaced_pod(\n namespace=addr_splitted[1], name=addr_splitted[0]\n )\n if pod.status.pod_ip:\n break\n # If ps pod is not ready yet, sleep 2 seconds and try again.\n time.sleep(2)\n addr = pod.status.pod_ip + \":\" + addr.split(\":\")[-1]\n channel = grpc.insecure_channel(\n addr,\n options=[\n (\n \"grpc.max_send_message_length\",\n GRPC.MAX_SEND_MESSAGE_LENGTH,\n ),\n (\n \"grpc.max_receive_message_length\",\n GRPC.MAX_RECEIVE_MESSAGE_LENGTH,\n ),\n ],\n )\n ps_channels.append(channel)\n\n worker = Worker(args, channel=master_channel, ps_channels=ps_channels)\n worker.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/worker/main.py"}], "after_files": [{"content": "import time\n\nimport grpc\nfrom kubernetes import client, config\n\nfrom elasticdl.python.common import log_utils\nfrom elasticdl.python.common.args import parse_worker_args\nfrom elasticdl.python.common.constants import GRPC\nfrom elasticdl.python.common.grpc_utils import build_channel\nfrom elasticdl.python.worker.worker import Worker\n\n\ndef main():\n args = parse_worker_args()\n logger = log_utils.get_logger(__name__)\n logger.info(\"Starting worker %d\", args.worker_id)\n if args.master_addr is None:\n raise ValueError(\"master_addr is missing for worker\")\n\n master_channel = build_channel(args.master_addr)\n\n ps_channels = []\n if args.ps_addrs:\n # TODO: use ps_addrs from master directly after ps service is working.\n # Get ps pod ip for ps grpc connection for now.\n ps_addrs = args.ps_addrs.split(\",\")\n\n config.load_incluster_config()\n api = client.CoreV1Api()\n\n for addr in ps_addrs:\n # addr is in the form as \"ps-pod-name.namespace.svc:port\"\n addr_splitted = addr.split(\".\")\n while True:\n pod = api.read_namespaced_pod(\n namespace=addr_splitted[1], name=addr_splitted[0]\n )\n if pod.status.pod_ip:\n break\n # If ps pod is not ready yet, sleep 2 seconds and try again.\n time.sleep(2)\n addr = pod.status.pod_ip + \":\" + addr.split(\":\")[-1]\n channel = grpc.insecure_channel(\n addr,\n options=[\n (\n \"grpc.max_send_message_length\",\n GRPC.MAX_SEND_MESSAGE_LENGTH,\n ),\n (\n \"grpc.max_receive_message_length\",\n GRPC.MAX_RECEIVE_MESSAGE_LENGTH,\n ),\n ],\n )\n\n # Wait the channel is ready by a Future object.\n grpc.channel_ready_future(channel).result()\n logger.info(\n \"grpc channel %s to connect pod %s is ready\"\n % (addr, pod.metadata.name)\n )\n ps_channels.append(channel)\n\n worker = Worker(args, channel=master_channel, ps_channels=ps_channels)\n worker.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/worker/main.py"}]}
1,012
144
gh_patches_debug_14345
rasdani/github-patches
git_diff
buildbot__buildbot-5765
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix reference to tuplematch in base reporters The tuplematch object was not referenced in the ReporterBase class when referenced from the GitHubStatusPush class. It seems this is not an issue when it is executed from the test but only occures when called from GitHubStatusPush. I don't know how I can test this. Fixes #5765 ## Contributor Checklist: * [ ] I have updated the unit tests * [x] I have created a file in the `master/buildbot/newsfragments` directory (and read the `README.txt` in that directory) * [x] I have updated the appropriate documentation --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `master/buildbot/reporters/base.py` Content: ``` 1 # This file is part of Buildbot. Buildbot is free software: you can 2 # redistribute it and/or modify it under the terms of the GNU General Public 3 # License as published by the Free Software Foundation, version 2. 4 # 5 # This program is distributed in the hope that it will be useful, but WITHOUT 6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 8 # details. 9 # 10 # You should have received a copy of the GNU General Public License along with 11 # this program; if not, write to the Free Software Foundation, Inc., 51 12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 13 # 14 # Copyright Buildbot Team Members 15 16 import abc 17 18 from twisted.internet import defer 19 from twisted.python import log 20 21 from buildbot import config 22 from buildbot import util 23 from buildbot.reporters import utils 24 from buildbot.util import service 25 26 ENCODING = 'utf-8' 27 28 29 class ReporterBase(service.BuildbotService): 30 name = None 31 __meta__ = abc.ABCMeta 32 33 compare_attrs = ['generators'] 34 35 def __init__(self, *args, **kwargs): 36 super().__init__(*args, **kwargs) 37 self.generators = None 38 self._event_consumers = [] 39 40 def checkConfig(self, generators): 41 if not isinstance(generators, list): 42 config.error('{}: generators argument must be a list') 43 44 for g in generators: 45 g.check() 46 47 if self.name is None: 48 self.name = self.__class__.__name__ 49 for g in generators: 50 self.name += "_" + g.generate_name() 51 52 @defer.inlineCallbacks 53 def reconfigService(self, generators): 54 55 for consumer in self._event_consumers: 56 yield consumer.stopConsuming() 57 self._event_consumers = [] 58 59 self.generators = generators 60 61 wanted_event_keys = set() 62 for g in self.generators: 63 wanted_event_keys.update(g.wanted_event_keys) 64 65 for key in sorted(list(wanted_event_keys)): 66 consumer = yield self.master.mq.startConsuming(self._got_event, key) 67 self._event_consumers.append(consumer) 68 69 @defer.inlineCallbacks 70 def stopService(self): 71 for consumer in self._event_consumers: 72 yield consumer.stopConsuming() 73 self._event_consumers = [] 74 yield super().stopService() 75 76 def _does_generator_want_key(self, generator, key): 77 for filter in generator.wanted_event_keys: 78 if util.tuplematch.matchTuple(key, filter): 79 return True 80 return False 81 82 @defer.inlineCallbacks 83 def _got_event(self, key, msg): 84 try: 85 reports = [] 86 for g in self.generators: 87 if self._does_generator_want_key(g, key): 88 report = yield g.generate(self.master, self, key, msg) 89 if report is not None: 90 reports.append(report) 91 92 if reports: 93 yield self.sendMessage(reports) 94 except Exception as e: 95 log.err(e, 'Got exception when handling reporter events') 96 97 def getResponsibleUsersForBuild(self, master, buildid): 98 # Use library method but subclassers may want to override that 99 return utils.getResponsibleUsersForBuild(master, buildid) 100 101 @abc.abstractmethod 102 def sendMessage(self, reports): 103 pass 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/master/buildbot/reporters/base.py b/master/buildbot/reporters/base.py --- a/master/buildbot/reporters/base.py +++ b/master/buildbot/reporters/base.py @@ -19,9 +19,9 @@ from twisted.python import log from buildbot import config -from buildbot import util from buildbot.reporters import utils from buildbot.util import service +from buildbot.util import tuplematch ENCODING = 'utf-8' @@ -75,7 +75,7 @@ def _does_generator_want_key(self, generator, key): for filter in generator.wanted_event_keys: - if util.tuplematch.matchTuple(key, filter): + if tuplematch.matchTuple(key, filter): return True return False
{"golden_diff": "diff --git a/master/buildbot/reporters/base.py b/master/buildbot/reporters/base.py\n--- a/master/buildbot/reporters/base.py\n+++ b/master/buildbot/reporters/base.py\n@@ -19,9 +19,9 @@\n from twisted.python import log\n \n from buildbot import config\n-from buildbot import util\n from buildbot.reporters import utils\n from buildbot.util import service\n+from buildbot.util import tuplematch\n \n ENCODING = 'utf-8'\n \n@@ -75,7 +75,7 @@\n \n def _does_generator_want_key(self, generator, key):\n for filter in generator.wanted_event_keys:\n- if util.tuplematch.matchTuple(key, filter):\n+ if tuplematch.matchTuple(key, filter):\n return True\n return False\n", "issue": "Fix reference to tuplematch in base reporters\nThe tuplematch object was not referenced in the ReporterBase class when referenced from the GitHubStatusPush class.\r\n\r\nIt seems this is not an issue when it is executed from the test but only occures when called from GitHubStatusPush. I don't know how I can test this.\r\n\r\nFixes #5765 \r\n\r\n\r\n## Contributor Checklist:\r\n\r\n* [ ] I have updated the unit tests\r\n* [x] I have created a file in the `master/buildbot/newsfragments` directory (and read the `README.txt` in that directory)\r\n* [x] I have updated the appropriate documentation\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport abc\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nfrom buildbot import config\nfrom buildbot import util\nfrom buildbot.reporters import utils\nfrom buildbot.util import service\n\nENCODING = 'utf-8'\n\n\nclass ReporterBase(service.BuildbotService):\n name = None\n __meta__ = abc.ABCMeta\n\n compare_attrs = ['generators']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.generators = None\n self._event_consumers = []\n\n def checkConfig(self, generators):\n if not isinstance(generators, list):\n config.error('{}: generators argument must be a list')\n\n for g in generators:\n g.check()\n\n if self.name is None:\n self.name = self.__class__.__name__\n for g in generators:\n self.name += \"_\" + g.generate_name()\n\n @defer.inlineCallbacks\n def reconfigService(self, generators):\n\n for consumer in self._event_consumers:\n yield consumer.stopConsuming()\n self._event_consumers = []\n\n self.generators = generators\n\n wanted_event_keys = set()\n for g in self.generators:\n wanted_event_keys.update(g.wanted_event_keys)\n\n for key in sorted(list(wanted_event_keys)):\n consumer = yield self.master.mq.startConsuming(self._got_event, key)\n self._event_consumers.append(consumer)\n\n @defer.inlineCallbacks\n def stopService(self):\n for consumer in self._event_consumers:\n yield consumer.stopConsuming()\n self._event_consumers = []\n yield super().stopService()\n\n def _does_generator_want_key(self, generator, key):\n for filter in generator.wanted_event_keys:\n if util.tuplematch.matchTuple(key, filter):\n return True\n return False\n\n @defer.inlineCallbacks\n def _got_event(self, key, msg):\n try:\n reports = []\n for g in self.generators:\n if self._does_generator_want_key(g, key):\n report = yield g.generate(self.master, self, key, msg)\n if report is not None:\n reports.append(report)\n\n if reports:\n yield self.sendMessage(reports)\n except Exception as e:\n log.err(e, 'Got exception when handling reporter events')\n\n def getResponsibleUsersForBuild(self, master, buildid):\n # Use library method but subclassers may want to override that\n return utils.getResponsibleUsersForBuild(master, buildid)\n\n @abc.abstractmethod\n def sendMessage(self, reports):\n pass\n", "path": "master/buildbot/reporters/base.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport abc\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nfrom buildbot import config\nfrom buildbot.reporters import utils\nfrom buildbot.util import service\nfrom buildbot.util import tuplematch\n\nENCODING = 'utf-8'\n\n\nclass ReporterBase(service.BuildbotService):\n name = None\n __meta__ = abc.ABCMeta\n\n compare_attrs = ['generators']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.generators = None\n self._event_consumers = []\n\n def checkConfig(self, generators):\n if not isinstance(generators, list):\n config.error('{}: generators argument must be a list')\n\n for g in generators:\n g.check()\n\n if self.name is None:\n self.name = self.__class__.__name__\n for g in generators:\n self.name += \"_\" + g.generate_name()\n\n @defer.inlineCallbacks\n def reconfigService(self, generators):\n\n for consumer in self._event_consumers:\n yield consumer.stopConsuming()\n self._event_consumers = []\n\n self.generators = generators\n\n wanted_event_keys = set()\n for g in self.generators:\n wanted_event_keys.update(g.wanted_event_keys)\n\n for key in sorted(list(wanted_event_keys)):\n consumer = yield self.master.mq.startConsuming(self._got_event, key)\n self._event_consumers.append(consumer)\n\n @defer.inlineCallbacks\n def stopService(self):\n for consumer in self._event_consumers:\n yield consumer.stopConsuming()\n self._event_consumers = []\n yield super().stopService()\n\n def _does_generator_want_key(self, generator, key):\n for filter in generator.wanted_event_keys:\n if tuplematch.matchTuple(key, filter):\n return True\n return False\n\n @defer.inlineCallbacks\n def _got_event(self, key, msg):\n try:\n reports = []\n for g in self.generators:\n if self._does_generator_want_key(g, key):\n report = yield g.generate(self.master, self, key, msg)\n if report is not None:\n reports.append(report)\n\n if reports:\n yield self.sendMessage(reports)\n except Exception as e:\n log.err(e, 'Got exception when handling reporter events')\n\n def getResponsibleUsersForBuild(self, master, buildid):\n # Use library method but subclassers may want to override that\n return utils.getResponsibleUsersForBuild(master, buildid)\n\n @abc.abstractmethod\n def sendMessage(self, reports):\n pass\n", "path": "master/buildbot/reporters/base.py"}]}
1,344
176
gh_patches_debug_8036
rasdani/github-patches
git_diff
saleor__saleor-2345
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Featured products section should not be shown if there is none If there is no featured products, the home page should not show the empty section. ### Screenshots ![screenshot](https://i.imgur.com/KFPEPnz.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/core/views.py` Content: ``` 1 import json 2 3 from django.contrib import messages 4 from django.template.response import TemplateResponse 5 from django.utils.translation import pgettext_lazy 6 from impersonate.views import impersonate as orig_impersonate 7 8 from ..account.models import User 9 from ..dashboard.views import staff_member_required 10 from ..product.utils import products_for_homepage 11 from ..product.utils.availability import products_with_availability 12 from ..seo.schema.webpage import get_webpage_schema 13 14 15 def home(request): 16 products = products_for_homepage()[:8] 17 products = products_with_availability( 18 products, discounts=request.discounts, taxes=request.taxes, 19 local_currency=request.currency) 20 webpage_schema = get_webpage_schema(request) 21 return TemplateResponse( 22 request, 'home.html', { 23 'parent': None, 24 'products': products, 25 'webpage_schema': json.dumps(webpage_schema)}) 26 27 28 @staff_member_required 29 def styleguide(request): 30 return TemplateResponse(request, 'styleguide.html') 31 32 33 def impersonate(request, uid): 34 response = orig_impersonate(request, uid) 35 if request.session.modified: 36 msg = pgettext_lazy( 37 'Impersonation message', 38 'You are now logged as {}'.format(User.objects.get(pk=uid))) 39 messages.success(request, msg) 40 return response 41 42 43 def handle_404(request, exception=None): 44 return TemplateResponse(request, '404.html', status=404) 45 46 47 def manifest(request): 48 return TemplateResponse( 49 request, 'manifest.json', content_type='application/json') 50 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/core/views.py b/saleor/core/views.py --- a/saleor/core/views.py +++ b/saleor/core/views.py @@ -14,9 +14,9 @@ def home(request): products = products_for_homepage()[:8] - products = products_with_availability( + products = list(products_with_availability( products, discounts=request.discounts, taxes=request.taxes, - local_currency=request.currency) + local_currency=request.currency)) webpage_schema = get_webpage_schema(request) return TemplateResponse( request, 'home.html', {
{"golden_diff": "diff --git a/saleor/core/views.py b/saleor/core/views.py\n--- a/saleor/core/views.py\n+++ b/saleor/core/views.py\n@@ -14,9 +14,9 @@\n \n def home(request):\n products = products_for_homepage()[:8]\n- products = products_with_availability(\n+ products = list(products_with_availability(\n products, discounts=request.discounts, taxes=request.taxes,\n- local_currency=request.currency)\n+ local_currency=request.currency))\n webpage_schema = get_webpage_schema(request)\n return TemplateResponse(\n request, 'home.html', {\n", "issue": "Featured products section should not be shown if there is none\nIf there is no featured products, the home page should not show the empty section.\r\n\r\n### Screenshots\r\n![screenshot](https://i.imgur.com/KFPEPnz.png)\r\n\n", "before_files": [{"content": "import json\n\nfrom django.contrib import messages\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import pgettext_lazy\nfrom impersonate.views import impersonate as orig_impersonate\n\nfrom ..account.models import User\nfrom ..dashboard.views import staff_member_required\nfrom ..product.utils import products_for_homepage\nfrom ..product.utils.availability import products_with_availability\nfrom ..seo.schema.webpage import get_webpage_schema\n\n\ndef home(request):\n products = products_for_homepage()[:8]\n products = products_with_availability(\n products, discounts=request.discounts, taxes=request.taxes,\n local_currency=request.currency)\n webpage_schema = get_webpage_schema(request)\n return TemplateResponse(\n request, 'home.html', {\n 'parent': None,\n 'products': products,\n 'webpage_schema': json.dumps(webpage_schema)})\n\n\n@staff_member_required\ndef styleguide(request):\n return TemplateResponse(request, 'styleguide.html')\n\n\ndef impersonate(request, uid):\n response = orig_impersonate(request, uid)\n if request.session.modified:\n msg = pgettext_lazy(\n 'Impersonation message',\n 'You are now logged as {}'.format(User.objects.get(pk=uid)))\n messages.success(request, msg)\n return response\n\n\ndef handle_404(request, exception=None):\n return TemplateResponse(request, '404.html', status=404)\n\n\ndef manifest(request):\n return TemplateResponse(\n request, 'manifest.json', content_type='application/json')\n", "path": "saleor/core/views.py"}], "after_files": [{"content": "import json\n\nfrom django.contrib import messages\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import pgettext_lazy\nfrom impersonate.views import impersonate as orig_impersonate\n\nfrom ..account.models import User\nfrom ..dashboard.views import staff_member_required\nfrom ..product.utils import products_for_homepage\nfrom ..product.utils.availability import products_with_availability\nfrom ..seo.schema.webpage import get_webpage_schema\n\n\ndef home(request):\n products = products_for_homepage()[:8]\n products = list(products_with_availability(\n products, discounts=request.discounts, taxes=request.taxes,\n local_currency=request.currency))\n webpage_schema = get_webpage_schema(request)\n return TemplateResponse(\n request, 'home.html', {\n 'parent': None,\n 'products': products,\n 'webpage_schema': json.dumps(webpage_schema)})\n\n\n@staff_member_required\ndef styleguide(request):\n return TemplateResponse(request, 'styleguide.html')\n\n\ndef impersonate(request, uid):\n response = orig_impersonate(request, uid)\n if request.session.modified:\n msg = pgettext_lazy(\n 'Impersonation message',\n 'You are now logged as {}'.format(User.objects.get(pk=uid)))\n messages.success(request, msg)\n return response\n\n\ndef handle_404(request, exception=None):\n return TemplateResponse(request, '404.html', status=404)\n\n\ndef manifest(request):\n return TemplateResponse(\n request, 'manifest.json', content_type='application/json')\n", "path": "saleor/core/views.py"}]}
728
133
gh_patches_debug_4222
rasdani/github-patches
git_diff
learningequality__kolibri-8449
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Single user syncing - Lesson completion is not restored after second setup of the same learner account ## Observed behavior I have setup a learner-only device and have completed a lesson assigned to the learner user which is then synced correctly to the server. After that I've deleted the android app storage and cache and repeated the setup for the same user expecting the lesson progress to be restored while in reality it does not get restored and the learner has to start over. ## Expected behavior Any synced user data should be restored. ## Steps to reproduce the issue 1. Install this Windows [build](https://buildkite.com/learningequality/kolibri-windows/builds/1930) and this Android [build](https://buildkite.com/learningequality/kolibri-android-installer/builds/2692) 2. Setup the Windows app as a server and setup a learner device on Android. 3. As a learner complete an assigned lesson. 4. As an admin go to Coach>Reports>Lessons and verify that the lesson progress is synced correctly. 5. Delete the cache and storage of the android app and setup the same learner user again. Observe that there is no indication for the progress made so far. ## Additional information Admin: ![2021-09-16_12-39-50](https://user-images.githubusercontent.com/79847249/133593574-4c5124df-d5d8-421c-8d79-9e36c04c8fe0.png) Learner with completed lesson: ![2021-09-16_12-38-33](https://user-images.githubusercontent.com/79847249/133593661-6cf00ade-1a61-45f6-93d7-cea75eda7203.png) Restored learner device without any indication of the progress made: ![2021-09-16_12-49-17](https://user-images.githubusercontent.com/79847249/133593883-a6232fdc-10af-46c0-a1ed-d4b566ee94b8.png) ## Windows Logs [logs.zip](https://github.com/learningequality/kolibri/files/7176855/logs.zip) ## Android and Ubuntu Logs and DB files https://drive.google.com/file/d/1RAZG77NHuX92puj-KxA-GtkSpx3JxjyZ/view?usp=sharing ## Usage Details - OS: Windows 10, Android 11 - Browser: Chrome Single user syncing - Lesson completion is not restored after second setup of the same learner account ## Observed behavior I have setup a learner-only device and have completed a lesson assigned to the learner user which is then synced correctly to the server. After that I've deleted the android app storage and cache and repeated the setup for the same user expecting the lesson progress to be restored while in reality it does not get restored and the learner has to start over. ## Expected behavior Any synced user data should be restored. ## Steps to reproduce the issue 1. Install this Windows [build](https://buildkite.com/learningequality/kolibri-windows/builds/1930) and this Android [build](https://buildkite.com/learningequality/kolibri-android-installer/builds/2692) 2. Setup the Windows app as a server and setup a learner device on Android. 3. As a learner complete an assigned lesson. 4. As an admin go to Coach>Reports>Lessons and verify that the lesson progress is synced correctly. 5. Delete the cache and storage of the android app and setup the same learner user again. Observe that there is no indication for the progress made so far. ## Additional information Admin: ![2021-09-16_12-39-50](https://user-images.githubusercontent.com/79847249/133593574-4c5124df-d5d8-421c-8d79-9e36c04c8fe0.png) Learner with completed lesson: ![2021-09-16_12-38-33](https://user-images.githubusercontent.com/79847249/133593661-6cf00ade-1a61-45f6-93d7-cea75eda7203.png) Restored learner device without any indication of the progress made: ![2021-09-16_12-49-17](https://user-images.githubusercontent.com/79847249/133593883-a6232fdc-10af-46c0-a1ed-d4b566ee94b8.png) ## Windows Logs [logs.zip](https://github.com/learningequality/kolibri/files/7176855/logs.zip) ## Android and Ubuntu Logs and DB files https://drive.google.com/file/d/1RAZG77NHuX92puj-KxA-GtkSpx3JxjyZ/view?usp=sharing ## Usage Details - OS: Windows 10, Android 11 - Browser: Chrome --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/core/exams/kolibri_plugin.py` Content: ``` 1 # To reinstate the original functionality, please remove this header comment 2 # and uncomment the code below 3 # from .single_user_assignment_utils import ( 4 # update_assignments_from_individual_syncable_exams, 5 # ) 6 # from .single_user_assignment_utils import ( 7 # update_individual_syncable_exams_from_assignments, 8 # ) 9 # from kolibri.core.auth.hooks import FacilityDataSyncHook 10 # from kolibri.plugins.hooks import register_hook 11 # @register_hook 12 # class SingleUserExamSyncHook(FacilityDataSyncHook): 13 # def pre_transfer( 14 # self, 15 # dataset_id, 16 # local_is_single_user, 17 # remote_is_single_user, 18 # single_user_id, 19 # context, 20 # ): 21 # # if we're about to send data to a single-user device, prep the syncable exam assignments 22 # if context.is_producer and remote_is_single_user: 23 # update_individual_syncable_exams_from_assignments(single_user_id) 24 # def post_transfer( 25 # self, 26 # dataset_id, 27 # local_is_single_user, 28 # remote_is_single_user, 29 # single_user_id, 30 # context, 31 # ): 32 # # if we've just received data on a single-user device, update the exams and assignments 33 # if context.is_receiver and local_is_single_user: 34 # update_assignments_from_individual_syncable_exams(single_user_id) 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/core/exams/kolibri_plugin.py b/kolibri/core/exams/kolibri_plugin.py --- a/kolibri/core/exams/kolibri_plugin.py +++ b/kolibri/core/exams/kolibri_plugin.py @@ -1,5 +1,5 @@ # To reinstate the original functionality, please remove this header comment -# and uncomment the code below +# and uncomment the code below (and re-enable exams in test_single_user_assignment_sync, search for "TODO") # from .single_user_assignment_utils import ( # update_assignments_from_individual_syncable_exams, # )
{"golden_diff": "diff --git a/kolibri/core/exams/kolibri_plugin.py b/kolibri/core/exams/kolibri_plugin.py\n--- a/kolibri/core/exams/kolibri_plugin.py\n+++ b/kolibri/core/exams/kolibri_plugin.py\n@@ -1,5 +1,5 @@\n # To reinstate the original functionality, please remove this header comment\n-# and uncomment the code below\n+# and uncomment the code below (and re-enable exams in test_single_user_assignment_sync, search for \"TODO\")\n # from .single_user_assignment_utils import (\n # update_assignments_from_individual_syncable_exams,\n # )\n", "issue": "Single user syncing - Lesson completion is not restored after second setup of the same learner account\n## Observed behavior\r\nI have setup a learner-only device and have completed a lesson assigned to the learner user which is then synced correctly to the server. After that I've deleted the android app storage and cache and repeated the setup for the same user expecting the lesson progress to be restored while in reality it does not get restored and the learner has to start over.\r\n\r\n## Expected behavior\r\nAny synced user data should be restored.\r\n\r\n## Steps to reproduce the issue\r\n1. Install this Windows [build](https://buildkite.com/learningequality/kolibri-windows/builds/1930) and this Android [build](https://buildkite.com/learningequality/kolibri-android-installer/builds/2692)\r\n2. Setup the Windows app as a server and setup a learner device on Android.\r\n3. As a learner complete an assigned lesson.\r\n4. As an admin go to Coach>Reports>Lessons and verify that the lesson progress is synced correctly.\r\n5. Delete the cache and storage of the android app and setup the same learner user again. Observe that there is no indication for the progress made so far.\r\n\r\n## Additional information\r\nAdmin:\r\n![2021-09-16_12-39-50](https://user-images.githubusercontent.com/79847249/133593574-4c5124df-d5d8-421c-8d79-9e36c04c8fe0.png)\r\n\r\nLearner with completed lesson:\r\n![2021-09-16_12-38-33](https://user-images.githubusercontent.com/79847249/133593661-6cf00ade-1a61-45f6-93d7-cea75eda7203.png)\r\n\r\nRestored learner device without any indication of the progress made:\r\n![2021-09-16_12-49-17](https://user-images.githubusercontent.com/79847249/133593883-a6232fdc-10af-46c0-a1ed-d4b566ee94b8.png)\r\n\r\n\r\n## Windows Logs\r\n[logs.zip](https://github.com/learningequality/kolibri/files/7176855/logs.zip)\r\n\r\n## Android and Ubuntu Logs and DB files\r\nhttps://drive.google.com/file/d/1RAZG77NHuX92puj-KxA-GtkSpx3JxjyZ/view?usp=sharing\r\n\r\n## Usage Details\r\n - OS: Windows 10, Android 11\r\n - Browser: Chrome\nSingle user syncing - Lesson completion is not restored after second setup of the same learner account\n## Observed behavior\r\nI have setup a learner-only device and have completed a lesson assigned to the learner user which is then synced correctly to the server. After that I've deleted the android app storage and cache and repeated the setup for the same user expecting the lesson progress to be restored while in reality it does not get restored and the learner has to start over.\r\n\r\n## Expected behavior\r\nAny synced user data should be restored.\r\n\r\n## Steps to reproduce the issue\r\n1. Install this Windows [build](https://buildkite.com/learningequality/kolibri-windows/builds/1930) and this Android [build](https://buildkite.com/learningequality/kolibri-android-installer/builds/2692)\r\n2. Setup the Windows app as a server and setup a learner device on Android.\r\n3. As a learner complete an assigned lesson.\r\n4. As an admin go to Coach>Reports>Lessons and verify that the lesson progress is synced correctly.\r\n5. Delete the cache and storage of the android app and setup the same learner user again. Observe that there is no indication for the progress made so far.\r\n\r\n## Additional information\r\nAdmin:\r\n![2021-09-16_12-39-50](https://user-images.githubusercontent.com/79847249/133593574-4c5124df-d5d8-421c-8d79-9e36c04c8fe0.png)\r\n\r\nLearner with completed lesson:\r\n![2021-09-16_12-38-33](https://user-images.githubusercontent.com/79847249/133593661-6cf00ade-1a61-45f6-93d7-cea75eda7203.png)\r\n\r\nRestored learner device without any indication of the progress made:\r\n![2021-09-16_12-49-17](https://user-images.githubusercontent.com/79847249/133593883-a6232fdc-10af-46c0-a1ed-d4b566ee94b8.png)\r\n\r\n\r\n## Windows Logs\r\n[logs.zip](https://github.com/learningequality/kolibri/files/7176855/logs.zip)\r\n\r\n## Android and Ubuntu Logs and DB files\r\nhttps://drive.google.com/file/d/1RAZG77NHuX92puj-KxA-GtkSpx3JxjyZ/view?usp=sharing\r\n\r\n## Usage Details\r\n - OS: Windows 10, Android 11\r\n - Browser: Chrome\n", "before_files": [{"content": "# To reinstate the original functionality, please remove this header comment\n# and uncomment the code below\n# from .single_user_assignment_utils import (\n# update_assignments_from_individual_syncable_exams,\n# )\n# from .single_user_assignment_utils import (\n# update_individual_syncable_exams_from_assignments,\n# )\n# from kolibri.core.auth.hooks import FacilityDataSyncHook\n# from kolibri.plugins.hooks import register_hook\n# @register_hook\n# class SingleUserExamSyncHook(FacilityDataSyncHook):\n# def pre_transfer(\n# self,\n# dataset_id,\n# local_is_single_user,\n# remote_is_single_user,\n# single_user_id,\n# context,\n# ):\n# # if we're about to send data to a single-user device, prep the syncable exam assignments\n# if context.is_producer and remote_is_single_user:\n# update_individual_syncable_exams_from_assignments(single_user_id)\n# def post_transfer(\n# self,\n# dataset_id,\n# local_is_single_user,\n# remote_is_single_user,\n# single_user_id,\n# context,\n# ):\n# # if we've just received data on a single-user device, update the exams and assignments\n# if context.is_receiver and local_is_single_user:\n# update_assignments_from_individual_syncable_exams(single_user_id)\n", "path": "kolibri/core/exams/kolibri_plugin.py"}], "after_files": [{"content": "# To reinstate the original functionality, please remove this header comment\n# and uncomment the code below (and re-enable exams in test_single_user_assignment_sync, search for \"TODO\")\n# from .single_user_assignment_utils import (\n# update_assignments_from_individual_syncable_exams,\n# )\n# from .single_user_assignment_utils import (\n# update_individual_syncable_exams_from_assignments,\n# )\n# from kolibri.core.auth.hooks import FacilityDataSyncHook\n# from kolibri.plugins.hooks import register_hook\n# @register_hook\n# class SingleUserExamSyncHook(FacilityDataSyncHook):\n# def pre_transfer(\n# self,\n# dataset_id,\n# local_is_single_user,\n# remote_is_single_user,\n# single_user_id,\n# context,\n# ):\n# # if we're about to send data to a single-user device, prep the syncable exam assignments\n# if context.is_producer and remote_is_single_user:\n# update_individual_syncable_exams_from_assignments(single_user_id)\n# def post_transfer(\n# self,\n# dataset_id,\n# local_is_single_user,\n# remote_is_single_user,\n# single_user_id,\n# context,\n# ):\n# # if we've just received data on a single-user device, update the exams and assignments\n# if context.is_receiver and local_is_single_user:\n# update_assignments_from_individual_syncable_exams(single_user_id)\n", "path": "kolibri/core/exams/kolibri_plugin.py"}]}
1,845
134
gh_patches_debug_1146
rasdani/github-patches
git_diff
locustio__locust-1760
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Locust stopped working after Flast 2.0 got released in setup.py I can see: ` "flask>=1.1.2", ` I guess it should be hardcoded to ==1.1.2 for now. it crashes with: ``` File "/root/.local/share/virtualenvs/xxxxxxx/lib/python3.6/site-packages/locust/web.py", line 102, in __init__ app.jinja_options["extensions"].append("jinja2.ext.do") KeyError: 'extensions' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import ast 3 import os 4 import re 5 import sys 6 7 from setuptools import find_packages, setup 8 9 ROOT_PATH = os.path.abspath(os.path.dirname(__file__)) 10 11 # parse version from locust/__init__.py 12 _version_re = re.compile(r"__version__\s+=\s+(.*)") 13 _init_file = os.path.join(ROOT_PATH, "locust", "__init__.py") 14 with open(_init_file, "rb") as f: 15 version = str(ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1))) 16 17 setup( 18 name="locust", 19 version=version, 20 install_requires=[ 21 "gevent>=20.9.0", 22 "flask>=1.1.2", 23 "Werkzeug>=1.0.1", 24 "requests>=2.9.1", 25 "msgpack>=0.6.2", 26 "pyzmq>=16.0.2", 27 "geventhttpclient>=1.4.4", 28 "ConfigArgParse>=1.0", 29 "psutil>=5.6.7", 30 "Flask-BasicAuth>=0.2.0", 31 ], 32 test_suite="locust.test", 33 tests_require=[ 34 "cryptography", 35 "mock", 36 "pyquery", 37 ], 38 extras_require={ 39 ":sys_platform == 'win32'": ["pywin32"], 40 }, 41 ) 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ version=version, install_requires=[ "gevent>=20.9.0", - "flask>=1.1.2", + "flask==1.1.2", "Werkzeug>=1.0.1", "requests>=2.9.1", "msgpack>=0.6.2",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,7 @@\n version=version,\n install_requires=[\n \"gevent>=20.9.0\",\n- \"flask>=1.1.2\",\n+ \"flask==1.1.2\",\n \"Werkzeug>=1.0.1\",\n \"requests>=2.9.1\",\n \"msgpack>=0.6.2\",\n", "issue": "Locust stopped working after Flast 2.0 got released\nin setup.py I can see:\r\n` \"flask>=1.1.2\", `\r\nI guess it should be hardcoded to ==1.1.2 for now.\r\n\r\nit crashes with:\r\n```\r\nFile \"/root/.local/share/virtualenvs/xxxxxxx/lib/python3.6/site-packages/locust/web.py\", line 102, in __init__\r\napp.jinja_options[\"extensions\"].append(\"jinja2.ext.do\")\r\nKeyError: 'extensions'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r\"__version__\\s+=\\s+(.*)\")\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, \"rb\") as f:\n version = str(ast.literal_eval(_version_re.search(f.read().decode(\"utf-8\")).group(1)))\n\nsetup(\n name=\"locust\",\n version=version,\n install_requires=[\n \"gevent>=20.9.0\",\n \"flask>=1.1.2\",\n \"Werkzeug>=1.0.1\",\n \"requests>=2.9.1\",\n \"msgpack>=0.6.2\",\n \"pyzmq>=16.0.2\",\n \"geventhttpclient>=1.4.4\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\",\n ],\n test_suite=\"locust.test\",\n tests_require=[\n \"cryptography\",\n \"mock\",\n \"pyquery\",\n ],\n extras_require={\n \":sys_platform == 'win32'\": [\"pywin32\"],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r\"__version__\\s+=\\s+(.*)\")\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, \"rb\") as f:\n version = str(ast.literal_eval(_version_re.search(f.read().decode(\"utf-8\")).group(1)))\n\nsetup(\n name=\"locust\",\n version=version,\n install_requires=[\n \"gevent>=20.9.0\",\n \"flask==1.1.2\",\n \"Werkzeug>=1.0.1\",\n \"requests>=2.9.1\",\n \"msgpack>=0.6.2\",\n \"pyzmq>=16.0.2\",\n \"geventhttpclient>=1.4.4\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\",\n ],\n test_suite=\"locust.test\",\n tests_require=[\n \"cryptography\",\n \"mock\",\n \"pyquery\",\n ],\n extras_require={\n \":sys_platform == 'win32'\": [\"pywin32\"],\n },\n)\n", "path": "setup.py"}]}
763
110
gh_patches_debug_33387
rasdani/github-patches
git_diff
scrapy__scrapy-791
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- _nons function is not used in scrapy.contrib.linkextractors.lxmlhtml There is either a bug or some stray code in https://github.com/scrapy/scrapy/blob/master/scrapy/contrib/linkextractors/lxmlhtml.py#L37: `tag = _nons(el.tag)` local variable is not used, and so `_nons` function is also unused. @redapple - what was the intended behavior? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/contrib/linkextractors/lxmlhtml.py` Content: ``` 1 """ 2 Link extractor based on lxml.html 3 """ 4 5 import re 6 from urlparse import urlparse, urljoin 7 8 import lxml.etree as etree 9 10 from scrapy.selector import Selector 11 from scrapy.link import Link 12 from scrapy.utils.misc import arg_to_iter 13 from scrapy.utils.python import unique as unique_list, str_to_unicode 14 from scrapy.linkextractor import FilteringLinkExtractor 15 from scrapy.utils.response import get_base_url 16 17 18 # from lxml/src/lxml/html/__init__.py 19 XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml" 20 21 _collect_string_content = etree.XPath("string()") 22 23 def _nons(tag): 24 if isinstance(tag, basestring): 25 if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE: 26 return tag.split('}')[-1] 27 return tag 28 29 30 class LxmlParserLinkExtractor(object): 31 def __init__(self, tag="a", attr="href", process=None, unique=False): 32 self.scan_tag = tag if callable(tag) else lambda t: t == tag 33 self.scan_attr = attr if callable(attr) else lambda a: a == attr 34 self.process_attr = process if callable(process) else lambda v: v 35 self.unique = unique 36 37 def _iter_links(self, document): 38 for el in document.iter(etree.Element): 39 tag = _nons(el.tag) 40 if not self.scan_tag(el.tag): 41 continue 42 attribs = el.attrib 43 for attrib in attribs: 44 yield (el, attrib, attribs[attrib]) 45 46 def _extract_links(self, selector, response_url, response_encoding, base_url): 47 links = [] 48 # hacky way to get the underlying lxml parsed document 49 for el, attr, attr_val in self._iter_links(selector._root): 50 if self.scan_tag(el.tag) and self.scan_attr(attr): 51 # pseudo _root.make_links_absolute(base_url) 52 attr_val = urljoin(base_url, attr_val) 53 url = self.process_attr(attr_val) 54 if url is None: 55 continue 56 if isinstance(url, unicode): 57 url = url.encode(response_encoding) 58 # to fix relative links after process_value 59 url = urljoin(response_url, url) 60 link = Link(url, _collect_string_content(el) or u'', 61 nofollow=True if el.get('rel') == 'nofollow' else False) 62 links.append(link) 63 64 return unique_list(links, key=lambda link: link.url) \ 65 if self.unique else links 66 67 def extract_links(self, response): 68 html = Selector(response) 69 base_url = get_base_url(response) 70 return self._extract_links(html, response.url, response.encoding, base_url) 71 72 def _process_links(self, links): 73 """ Normalize and filter extracted links 74 75 The subclass should override it if neccessary 76 """ 77 links = unique_list(links, key=lambda link: link.url) if self.unique else links 78 return links 79 80 81 class LxmlLinkExtractor(FilteringLinkExtractor): 82 83 def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(), 84 tags=('a', 'area'), attrs=('href',), canonicalize=True, unique=True, process_value=None, 85 deny_extensions=None): 86 tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs)) 87 tag_func = lambda x: x in tags 88 attr_func = lambda x: x in attrs 89 lx = LxmlParserLinkExtractor(tag=tag_func, attr=attr_func, 90 unique=unique, process=process_value) 91 92 super(LxmlLinkExtractor, self).__init__(lx, allow, deny, 93 allow_domains, deny_domains, restrict_xpaths, canonicalize, 94 deny_extensions) 95 96 def extract_links(self, response): 97 html = Selector(response) 98 base_url = get_base_url(response) 99 if self.restrict_xpaths: 100 docs = [subdoc 101 for x in self.restrict_xpaths 102 for subdoc in html.xpath(x)] 103 else: 104 docs = [html] 105 all_links = [] 106 for doc in docs: 107 links = self._extract_links(doc, response.url, response.encoding, base_url) 108 all_links.extend(self._process_links(links)) 109 return unique_list(all_links) 110 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/contrib/linkextractors/lxmlhtml.py b/scrapy/contrib/linkextractors/lxmlhtml.py --- a/scrapy/contrib/linkextractors/lxmlhtml.py +++ b/scrapy/contrib/linkextractors/lxmlhtml.py @@ -36,30 +36,30 @@ def _iter_links(self, document): for el in document.iter(etree.Element): - tag = _nons(el.tag) - if not self.scan_tag(el.tag): + if not self.scan_tag(_nons(el.tag)): continue attribs = el.attrib for attrib in attribs: + if not self.scan_attr(attrib): + continue yield (el, attrib, attribs[attrib]) def _extract_links(self, selector, response_url, response_encoding, base_url): links = [] # hacky way to get the underlying lxml parsed document for el, attr, attr_val in self._iter_links(selector._root): - if self.scan_tag(el.tag) and self.scan_attr(attr): - # pseudo _root.make_links_absolute(base_url) - attr_val = urljoin(base_url, attr_val) - url = self.process_attr(attr_val) - if url is None: - continue - if isinstance(url, unicode): - url = url.encode(response_encoding) - # to fix relative links after process_value - url = urljoin(response_url, url) - link = Link(url, _collect_string_content(el) or u'', - nofollow=True if el.get('rel') == 'nofollow' else False) - links.append(link) + # pseudo lxml.html.HtmlElement.make_links_absolute(base_url) + attr_val = urljoin(base_url, attr_val) + url = self.process_attr(attr_val) + if url is None: + continue + if isinstance(url, unicode): + url = url.encode(response_encoding) + # to fix relative links after process_value + url = urljoin(response_url, url) + link = Link(url, _collect_string_content(el) or u'', + nofollow=True if el.get('rel') == 'nofollow' else False) + links.append(link) return unique_list(links, key=lambda link: link.url) \ if self.unique else links
{"golden_diff": "diff --git a/scrapy/contrib/linkextractors/lxmlhtml.py b/scrapy/contrib/linkextractors/lxmlhtml.py\n--- a/scrapy/contrib/linkextractors/lxmlhtml.py\n+++ b/scrapy/contrib/linkextractors/lxmlhtml.py\n@@ -36,30 +36,30 @@\n \n def _iter_links(self, document):\n for el in document.iter(etree.Element):\n- tag = _nons(el.tag)\n- if not self.scan_tag(el.tag):\n+ if not self.scan_tag(_nons(el.tag)):\n continue\n attribs = el.attrib\n for attrib in attribs:\n+ if not self.scan_attr(attrib):\n+ continue\n yield (el, attrib, attribs[attrib])\n \n def _extract_links(self, selector, response_url, response_encoding, base_url):\n links = []\n # hacky way to get the underlying lxml parsed document\n for el, attr, attr_val in self._iter_links(selector._root):\n- if self.scan_tag(el.tag) and self.scan_attr(attr):\n- # pseudo _root.make_links_absolute(base_url)\n- attr_val = urljoin(base_url, attr_val)\n- url = self.process_attr(attr_val)\n- if url is None:\n- continue\n- if isinstance(url, unicode):\n- url = url.encode(response_encoding)\n- # to fix relative links after process_value\n- url = urljoin(response_url, url)\n- link = Link(url, _collect_string_content(el) or u'',\n- nofollow=True if el.get('rel') == 'nofollow' else False)\n- links.append(link)\n+ # pseudo lxml.html.HtmlElement.make_links_absolute(base_url)\n+ attr_val = urljoin(base_url, attr_val)\n+ url = self.process_attr(attr_val)\n+ if url is None:\n+ continue\n+ if isinstance(url, unicode):\n+ url = url.encode(response_encoding)\n+ # to fix relative links after process_value\n+ url = urljoin(response_url, url)\n+ link = Link(url, _collect_string_content(el) or u'',\n+ nofollow=True if el.get('rel') == 'nofollow' else False)\n+ links.append(link)\n \n return unique_list(links, key=lambda link: link.url) \\\n if self.unique else links\n", "issue": "_nons function is not used in scrapy.contrib.linkextractors.lxmlhtml\nThere is either a bug or some stray code in https://github.com/scrapy/scrapy/blob/master/scrapy/contrib/linkextractors/lxmlhtml.py#L37: `tag = _nons(el.tag)` local variable is not used, and so `_nons` function is also unused. @redapple - what was the intended behavior?\n\n", "before_files": [{"content": "\"\"\"\nLink extractor based on lxml.html\n\"\"\"\n\nimport re\nfrom urlparse import urlparse, urljoin\n\nimport lxml.etree as etree\n\nfrom scrapy.selector import Selector\nfrom scrapy.link import Link\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.python import unique as unique_list, str_to_unicode\nfrom scrapy.linkextractor import FilteringLinkExtractor\nfrom scrapy.utils.response import get_base_url\n\n\n# from lxml/src/lxml/html/__init__.py\nXHTML_NAMESPACE = \"http://www.w3.org/1999/xhtml\"\n\n_collect_string_content = etree.XPath(\"string()\")\n\ndef _nons(tag):\n if isinstance(tag, basestring):\n if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE:\n return tag.split('}')[-1]\n return tag\n\n\nclass LxmlParserLinkExtractor(object):\n def __init__(self, tag=\"a\", attr=\"href\", process=None, unique=False):\n self.scan_tag = tag if callable(tag) else lambda t: t == tag\n self.scan_attr = attr if callable(attr) else lambda a: a == attr\n self.process_attr = process if callable(process) else lambda v: v\n self.unique = unique\n\n def _iter_links(self, document):\n for el in document.iter(etree.Element):\n tag = _nons(el.tag)\n if not self.scan_tag(el.tag):\n continue\n attribs = el.attrib\n for attrib in attribs:\n yield (el, attrib, attribs[attrib])\n\n def _extract_links(self, selector, response_url, response_encoding, base_url):\n links = []\n # hacky way to get the underlying lxml parsed document\n for el, attr, attr_val in self._iter_links(selector._root):\n if self.scan_tag(el.tag) and self.scan_attr(attr):\n # pseudo _root.make_links_absolute(base_url)\n attr_val = urljoin(base_url, attr_val)\n url = self.process_attr(attr_val)\n if url is None:\n continue\n if isinstance(url, unicode):\n url = url.encode(response_encoding)\n # to fix relative links after process_value\n url = urljoin(response_url, url)\n link = Link(url, _collect_string_content(el) or u'',\n nofollow=True if el.get('rel') == 'nofollow' else False)\n links.append(link)\n\n return unique_list(links, key=lambda link: link.url) \\\n if self.unique else links\n\n def extract_links(self, response):\n html = Selector(response)\n base_url = get_base_url(response)\n return self._extract_links(html, response.url, response.encoding, base_url)\n\n def _process_links(self, links):\n \"\"\" Normalize and filter extracted links\n\n The subclass should override it if neccessary\n \"\"\"\n links = unique_list(links, key=lambda link: link.url) if self.unique else links\n return links\n\n\nclass LxmlLinkExtractor(FilteringLinkExtractor):\n\n def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),\n tags=('a', 'area'), attrs=('href',), canonicalize=True, unique=True, process_value=None,\n deny_extensions=None):\n tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))\n tag_func = lambda x: x in tags\n attr_func = lambda x: x in attrs\n lx = LxmlParserLinkExtractor(tag=tag_func, attr=attr_func,\n unique=unique, process=process_value)\n\n super(LxmlLinkExtractor, self).__init__(lx, allow, deny,\n allow_domains, deny_domains, restrict_xpaths, canonicalize,\n deny_extensions)\n\n def extract_links(self, response):\n html = Selector(response)\n base_url = get_base_url(response)\n if self.restrict_xpaths:\n docs = [subdoc\n for x in self.restrict_xpaths\n for subdoc in html.xpath(x)]\n else:\n docs = [html]\n all_links = []\n for doc in docs:\n links = self._extract_links(doc, response.url, response.encoding, base_url)\n all_links.extend(self._process_links(links))\n return unique_list(all_links)\n\n", "path": "scrapy/contrib/linkextractors/lxmlhtml.py"}], "after_files": [{"content": "\"\"\"\nLink extractor based on lxml.html\n\"\"\"\n\nimport re\nfrom urlparse import urlparse, urljoin\n\nimport lxml.etree as etree\n\nfrom scrapy.selector import Selector\nfrom scrapy.link import Link\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.python import unique as unique_list, str_to_unicode\nfrom scrapy.linkextractor import FilteringLinkExtractor\nfrom scrapy.utils.response import get_base_url\n\n\n# from lxml/src/lxml/html/__init__.py\nXHTML_NAMESPACE = \"http://www.w3.org/1999/xhtml\"\n\n_collect_string_content = etree.XPath(\"string()\")\n\ndef _nons(tag):\n if isinstance(tag, basestring):\n if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE:\n return tag.split('}')[-1]\n return tag\n\n\nclass LxmlParserLinkExtractor(object):\n def __init__(self, tag=\"a\", attr=\"href\", process=None, unique=False):\n self.scan_tag = tag if callable(tag) else lambda t: t == tag\n self.scan_attr = attr if callable(attr) else lambda a: a == attr\n self.process_attr = process if callable(process) else lambda v: v\n self.unique = unique\n\n def _iter_links(self, document):\n for el in document.iter(etree.Element):\n if not self.scan_tag(_nons(el.tag)):\n continue\n attribs = el.attrib\n for attrib in attribs:\n if not self.scan_attr(attrib):\n continue\n yield (el, attrib, attribs[attrib])\n\n def _extract_links(self, selector, response_url, response_encoding, base_url):\n links = []\n # hacky way to get the underlying lxml parsed document\n for el, attr, attr_val in self._iter_links(selector._root):\n # pseudo lxml.html.HtmlElement.make_links_absolute(base_url)\n attr_val = urljoin(base_url, attr_val)\n url = self.process_attr(attr_val)\n if url is None:\n continue\n if isinstance(url, unicode):\n url = url.encode(response_encoding)\n # to fix relative links after process_value\n url = urljoin(response_url, url)\n link = Link(url, _collect_string_content(el) or u'',\n nofollow=True if el.get('rel') == 'nofollow' else False)\n links.append(link)\n\n return unique_list(links, key=lambda link: link.url) \\\n if self.unique else links\n\n def extract_links(self, response):\n html = Selector(response)\n base_url = get_base_url(response)\n return self._extract_links(html, response.url, response.encoding, base_url)\n\n def _process_links(self, links):\n \"\"\" Normalize and filter extracted links\n\n The subclass should override it if neccessary\n \"\"\"\n links = unique_list(links, key=lambda link: link.url) if self.unique else links\n return links\n\n\nclass LxmlLinkExtractor(FilteringLinkExtractor):\n\n def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),\n tags=('a', 'area'), attrs=('href',), canonicalize=True, unique=True, process_value=None,\n deny_extensions=None):\n tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))\n tag_func = lambda x: x in tags\n attr_func = lambda x: x in attrs\n lx = LxmlParserLinkExtractor(tag=tag_func, attr=attr_func,\n unique=unique, process=process_value)\n\n super(LxmlLinkExtractor, self).__init__(lx, allow, deny,\n allow_domains, deny_domains, restrict_xpaths, canonicalize,\n deny_extensions)\n\n def extract_links(self, response):\n html = Selector(response)\n base_url = get_base_url(response)\n if self.restrict_xpaths:\n docs = [subdoc\n for x in self.restrict_xpaths\n for subdoc in html.xpath(x)]\n else:\n docs = [html]\n all_links = []\n for doc in docs:\n links = self._extract_links(doc, response.url, response.encoding, base_url)\n all_links.extend(self._process_links(links))\n return unique_list(all_links)\n\n", "path": "scrapy/contrib/linkextractors/lxmlhtml.py"}]}
1,497
510
gh_patches_debug_4157
rasdani/github-patches
git_diff
GPflow__GPflow-648
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Reproducibility using seeds Been running some experiments, and it seems I cannot get the same reproducible result even when setting numpy and tensorflow seeds at the top of the script like ``` np.random.seed(72) tf.set_random_seed(72) ``` Naturally, either I'm setting them wrong or I'm missing a RNG. I call Param.randomize() and Model.optimize(), and they seem to be the only sources of randomness at the moment (and I'm not even sure about the latter) so one of them is likely the culprit. Is there any way to either set the seed appropriately without messing with the source code or maybe adding an option allowing seeds to be set in the future? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gpflow/training/optimizer.py` Content: ``` 1 # Copyright 2017 Artem Artemev @awav 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # pylint: disable=no-self-use 16 # pylint: disable=too-few-public-methods 17 18 import abc 19 20 21 class Optimizer: 22 @abc.abstractmethod 23 def make_optimize_tensor(self, model, session=None, var_list=None, **kwargs): 24 """ 25 Make optimization tensor. 26 The `make_optimize_tensor` method builds optimization tensor and initializes 27 all necessary variables created by optimizer. 28 29 :param model: GPflow model. 30 :param session: Tensorflow session. 31 :param var_list: List of variables for training. 32 :param kwargs: Dictionary of extra parameters necessary for building 33 optimizer tensor. 34 :return: Tensorflow optimization tensor or operation. 35 """ 36 pass 37 38 @abc.abstractmethod 39 def minimize(self, model, session=None, var_list=None, feed_dict=None, 40 maxiter=1000, initialize=True, anchor=True, **kwargs): 41 raise NotImplementedError() 42 43 @staticmethod 44 def _gen_var_list(model, var_list): 45 var_list = var_list or [] 46 return list(set(model.trainable_tensors).union(var_list)) 47 48 @staticmethod 49 def _gen_feed_dict(model, feed_dict): 50 feed_dict = feed_dict or {} 51 model_feeds = {} if model.feeds is None else model.feeds 52 feed_dict.update(model_feeds) 53 if feed_dict == {}: 54 return None 55 return feed_dict 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gpflow/training/optimizer.py b/gpflow/training/optimizer.py --- a/gpflow/training/optimizer.py +++ b/gpflow/training/optimizer.py @@ -43,7 +43,8 @@ @staticmethod def _gen_var_list(model, var_list): var_list = var_list or [] - return list(set(model.trainable_tensors).union(var_list)) + all_vars = list(set(model.trainable_tensors).union(var_list)) + return sorted(all_vars, key=lambda x: x.name) @staticmethod def _gen_feed_dict(model, feed_dict):
{"golden_diff": "diff --git a/gpflow/training/optimizer.py b/gpflow/training/optimizer.py\n--- a/gpflow/training/optimizer.py\n+++ b/gpflow/training/optimizer.py\n@@ -43,7 +43,8 @@\n @staticmethod\n def _gen_var_list(model, var_list):\n var_list = var_list or []\n- return list(set(model.trainable_tensors).union(var_list))\n+ all_vars = list(set(model.trainable_tensors).union(var_list))\n+ return sorted(all_vars, key=lambda x: x.name)\n \n @staticmethod\n def _gen_feed_dict(model, feed_dict):\n", "issue": "Reproducibility using seeds\nBeen running some experiments, and it seems I cannot get the same reproducible result even when setting numpy and tensorflow seeds at the top of the script like\r\n\r\n```\r\nnp.random.seed(72)\r\ntf.set_random_seed(72)\r\n```\r\n\r\nNaturally, either I'm setting them wrong or I'm missing a RNG. I call Param.randomize() and Model.optimize(), and they seem to be the only sources of randomness at the moment (and I'm not even sure about the latter) so one of them is likely the culprit. Is there any way to either set the seed appropriately without messing with the source code or maybe adding an option allowing seeds to be set in the future?\n", "before_files": [{"content": "# Copyright 2017 Artem Artemev @awav\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=no-self-use\n# pylint: disable=too-few-public-methods\n\nimport abc\n\n\nclass Optimizer:\n @abc.abstractmethod\n def make_optimize_tensor(self, model, session=None, var_list=None, **kwargs):\n \"\"\"\n Make optimization tensor.\n The `make_optimize_tensor` method builds optimization tensor and initializes\n all necessary variables created by optimizer.\n\n :param model: GPflow model.\n :param session: Tensorflow session.\n :param var_list: List of variables for training.\n :param kwargs: Dictionary of extra parameters necessary for building\n optimizer tensor.\n :return: Tensorflow optimization tensor or operation.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def minimize(self, model, session=None, var_list=None, feed_dict=None,\n maxiter=1000, initialize=True, anchor=True, **kwargs):\n raise NotImplementedError()\n\n @staticmethod\n def _gen_var_list(model, var_list):\n var_list = var_list or []\n return list(set(model.trainable_tensors).union(var_list))\n\n @staticmethod\n def _gen_feed_dict(model, feed_dict):\n feed_dict = feed_dict or {}\n model_feeds = {} if model.feeds is None else model.feeds\n feed_dict.update(model_feeds)\n if feed_dict == {}:\n return None\n return feed_dict\n", "path": "gpflow/training/optimizer.py"}], "after_files": [{"content": "# Copyright 2017 Artem Artemev @awav\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=no-self-use\n# pylint: disable=too-few-public-methods\n\nimport abc\n\n\nclass Optimizer:\n @abc.abstractmethod\n def make_optimize_tensor(self, model, session=None, var_list=None, **kwargs):\n \"\"\"\n Make optimization tensor.\n The `make_optimize_tensor` method builds optimization tensor and initializes\n all necessary variables created by optimizer.\n\n :param model: GPflow model.\n :param session: Tensorflow session.\n :param var_list: List of variables for training.\n :param kwargs: Dictionary of extra parameters necessary for building\n optimizer tensor.\n :return: Tensorflow optimization tensor or operation.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def minimize(self, model, session=None, var_list=None, feed_dict=None,\n maxiter=1000, initialize=True, anchor=True, **kwargs):\n raise NotImplementedError()\n\n @staticmethod\n def _gen_var_list(model, var_list):\n var_list = var_list or []\n all_vars = list(set(model.trainable_tensors).union(var_list))\n return sorted(all_vars, key=lambda x: x.name)\n\n @staticmethod\n def _gen_feed_dict(model, feed_dict):\n feed_dict = feed_dict or {}\n model_feeds = {} if model.feeds is None else model.feeds\n feed_dict.update(model_feeds)\n if feed_dict == {}:\n return None\n return feed_dict\n", "path": "gpflow/training/optimizer.py"}]}
943
139
gh_patches_debug_3024
rasdani/github-patches
git_diff
MongoEngine__mongoengine-1454
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Rename modifier missing from update Not sure if this is intentional or not but it would be useful to have the `$rename` operator (or "modifier" for the update method for QuerySet and Document) available. I'm currently working around it with `exec_js`, like so: ``` python Document.objects.exec_js(""" function() { db[collection].update({}, {$rename: {foo: 'bar'}}); }""") ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mongoengine/base/common.py` Content: ``` 1 from mongoengine.errors import NotRegistered 2 3 __all__ = ('UPDATE_OPERATORS', 'get_document', '_document_registry') 4 5 6 UPDATE_OPERATORS = set(['set', 'unset', 'inc', 'dec', 'pop', 'push', 7 'push_all', 'pull', 'pull_all', 'add_to_set', 8 'set_on_insert', 'min', 'max']) 9 10 11 _document_registry = {} 12 13 14 def get_document(name): 15 """Get a document class by name.""" 16 doc = _document_registry.get(name, None) 17 if not doc: 18 # Possible old style name 19 single_end = name.split('.')[-1] 20 compound_end = '.%s' % single_end 21 possible_match = [k for k in _document_registry.keys() 22 if k.endswith(compound_end) or k == single_end] 23 if len(possible_match) == 1: 24 doc = _document_registry.get(possible_match.pop(), None) 25 if not doc: 26 raise NotRegistered(""" 27 `%s` has not been registered in the document registry. 28 Importing the document class automatically registers it, has it 29 been imported? 30 """.strip() % name) 31 return doc 32 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mongoengine/base/common.py b/mongoengine/base/common.py --- a/mongoengine/base/common.py +++ b/mongoengine/base/common.py @@ -5,7 +5,7 @@ UPDATE_OPERATORS = set(['set', 'unset', 'inc', 'dec', 'pop', 'push', 'push_all', 'pull', 'pull_all', 'add_to_set', - 'set_on_insert', 'min', 'max']) + 'set_on_insert', 'min', 'max', 'rename']) _document_registry = {}
{"golden_diff": "diff --git a/mongoengine/base/common.py b/mongoengine/base/common.py\n--- a/mongoengine/base/common.py\n+++ b/mongoengine/base/common.py\n@@ -5,7 +5,7 @@\n \n UPDATE_OPERATORS = set(['set', 'unset', 'inc', 'dec', 'pop', 'push',\n 'push_all', 'pull', 'pull_all', 'add_to_set',\n- 'set_on_insert', 'min', 'max'])\n+ 'set_on_insert', 'min', 'max', 'rename'])\n \n \n _document_registry = {}\n", "issue": "Rename modifier missing from update\nNot sure if this is intentional or not but it would be useful to have the `$rename` operator (or \"modifier\" for the update method for QuerySet and Document) available.\n\nI'm currently working around it with `exec_js`, like so:\n\n``` python\nDocument.objects.exec_js(\"\"\"\nfunction() {\n db[collection].update({}, {$rename: {foo: 'bar'}});\n}\"\"\")\n```\n\n", "before_files": [{"content": "from mongoengine.errors import NotRegistered\n\n__all__ = ('UPDATE_OPERATORS', 'get_document', '_document_registry')\n\n\nUPDATE_OPERATORS = set(['set', 'unset', 'inc', 'dec', 'pop', 'push',\n 'push_all', 'pull', 'pull_all', 'add_to_set',\n 'set_on_insert', 'min', 'max'])\n\n\n_document_registry = {}\n\n\ndef get_document(name):\n \"\"\"Get a document class by name.\"\"\"\n doc = _document_registry.get(name, None)\n if not doc:\n # Possible old style name\n single_end = name.split('.')[-1]\n compound_end = '.%s' % single_end\n possible_match = [k for k in _document_registry.keys()\n if k.endswith(compound_end) or k == single_end]\n if len(possible_match) == 1:\n doc = _document_registry.get(possible_match.pop(), None)\n if not doc:\n raise NotRegistered(\"\"\"\n `%s` has not been registered in the document registry.\n Importing the document class automatically registers it, has it\n been imported?\n \"\"\".strip() % name)\n return doc\n", "path": "mongoengine/base/common.py"}], "after_files": [{"content": "from mongoengine.errors import NotRegistered\n\n__all__ = ('UPDATE_OPERATORS', 'get_document', '_document_registry')\n\n\nUPDATE_OPERATORS = set(['set', 'unset', 'inc', 'dec', 'pop', 'push',\n 'push_all', 'pull', 'pull_all', 'add_to_set',\n 'set_on_insert', 'min', 'max', 'rename'])\n\n\n_document_registry = {}\n\n\ndef get_document(name):\n \"\"\"Get a document class by name.\"\"\"\n doc = _document_registry.get(name, None)\n if not doc:\n # Possible old style name\n single_end = name.split('.')[-1]\n compound_end = '.%s' % single_end\n possible_match = [k for k in _document_registry.keys()\n if k.endswith(compound_end) or k == single_end]\n if len(possible_match) == 1:\n doc = _document_registry.get(possible_match.pop(), None)\n if not doc:\n raise NotRegistered(\"\"\"\n `%s` has not been registered in the document registry.\n Importing the document class automatically registers it, has it\n been imported?\n \"\"\".strip() % name)\n return doc\n", "path": "mongoengine/base/common.py"}]}
655
122
gh_patches_debug_11130
rasdani/github-patches
git_diff
elastic__apm-agent-python-1423
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Breaking change in sync httpx refactor from #1403 elastic/apm-agent-python#1403 refactored a lot of the httpx code. However it seems that the sync version of the instrumentation still calls `self._set_disttracing_headers` which was removed in the same PR, causing any form of sync httpx instrumentation to crash. **Environment (please complete the following information)** - Agent version: 6.7.1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticapm/instrumentation/packages/httpx/sync/httpcore.py` Content: ``` 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2021, Elasticsearch BV 4 # All rights reserved. 5 # 6 # Redistribution and use in source and binary forms, with or without 7 # modification, are permitted provided that the following conditions are met: 8 # 9 # * Redistributions of source code must retain the above copyright notice, this 10 # list of conditions and the following disclaimer. 11 # 12 # * Redistributions in binary form must reproduce the above copyright notice, 13 # this list of conditions and the following disclaimer in the documentation 14 # and/or other materials provided with the distribution. 15 # 16 # * Neither the name of the copyright holder nor the names of its 17 # contributors may be used to endorse or promote products derived from 18 # this software without specific prior written permission. 19 # 20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule 32 from elasticapm.instrumentation.packages.httpx import utils 33 from elasticapm.traces import DroppedSpan, capture_span, execution_context 34 from elasticapm.utils import default_ports 35 from elasticapm.utils.disttracing import TracingOptions 36 37 38 class HTTPCoreInstrumentation(AbstractInstrumentedModule): 39 name = "httpcore" 40 41 instrument_list = [ 42 ("httpcore._sync.connection", "SyncHTTPConnection.request"), # < httpcore 0.13 43 ("httpcore._sync.connection", "SyncHTTPConnection.handle_request"), # >= httpcore 0.13 44 ("httpcore._sync.connection", "HTTPConnection.handle_request"), # httpcore >= 0.14 (hopefully...) 45 ] 46 47 def call(self, module, method, wrapped, instance, args, kwargs): 48 url, method, headers = utils.get_request_data(args, kwargs) 49 scheme, host, port, target = url 50 if port != default_ports.get(scheme): 51 host += ":" + str(port) 52 53 signature = "%s %s" % (method.upper(), host) 54 55 url = "%s://%s%s" % (scheme, host, target) 56 57 transaction = execution_context.get_transaction() 58 59 with capture_span( 60 signature, 61 span_type="external", 62 span_subtype="http", 63 extra={"http": {"url": url}}, 64 leaf=True, 65 ) as span: 66 # if httpcore has been called in a leaf span, this span might be a DroppedSpan. 67 leaf_span = span 68 while isinstance(leaf_span, DroppedSpan): 69 leaf_span = leaf_span.parent 70 71 if headers is not None: 72 # It's possible that there are only dropped spans, e.g. if we started dropping spans. 73 # In this case, the transaction.id is used 74 parent_id = leaf_span.id if leaf_span else transaction.id 75 trace_parent = transaction.trace_parent.copy_from( 76 span_id=parent_id, trace_options=TracingOptions(recorded=True) 77 ) 78 utils.set_disttracing_headers(headers, trace_parent, transaction) 79 if leaf_span: 80 leaf_span.dist_tracing_propagated = True 81 response = wrapped(*args, **kwargs) 82 status_code = utils.get_status(response) 83 if status_code: 84 if span.context: 85 span.context["http"]["status_code"] = status_code 86 span.set_success() if status_code < 400 else span.set_failure() 87 return response 88 89 def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction): 90 # since we don't have a span, we set the span id to the transaction id 91 trace_parent = transaction.trace_parent.copy_from( 92 span_id=transaction.id, trace_options=TracingOptions(recorded=False) 93 ) 94 if "headers" in kwargs: 95 headers = kwargs["headers"] 96 if headers is None: 97 headers = [] 98 kwargs["headers"] = headers 99 self._set_disttracing_headers(headers, trace_parent, transaction) 100 return args, kwargs 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticapm/instrumentation/packages/httpx/sync/httpcore.py b/elasticapm/instrumentation/packages/httpx/sync/httpcore.py --- a/elasticapm/instrumentation/packages/httpx/sync/httpcore.py +++ b/elasticapm/instrumentation/packages/httpx/sync/httpcore.py @@ -91,10 +91,6 @@ trace_parent = transaction.trace_parent.copy_from( span_id=transaction.id, trace_options=TracingOptions(recorded=False) ) - if "headers" in kwargs: - headers = kwargs["headers"] - if headers is None: - headers = [] - kwargs["headers"] = headers - self._set_disttracing_headers(headers, trace_parent, transaction) + headers = utils.get_request_data(args, kwargs)[2] + utils.set_disttracing_headers(headers, trace_parent, transaction) return args, kwargs
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/httpx/sync/httpcore.py b/elasticapm/instrumentation/packages/httpx/sync/httpcore.py\n--- a/elasticapm/instrumentation/packages/httpx/sync/httpcore.py\n+++ b/elasticapm/instrumentation/packages/httpx/sync/httpcore.py\n@@ -91,10 +91,6 @@\n trace_parent = transaction.trace_parent.copy_from(\n span_id=transaction.id, trace_options=TracingOptions(recorded=False)\n )\n- if \"headers\" in kwargs:\n- headers = kwargs[\"headers\"]\n- if headers is None:\n- headers = []\n- kwargs[\"headers\"] = headers\n- self._set_disttracing_headers(headers, trace_parent, transaction)\n+ headers = utils.get_request_data(args, kwargs)[2]\n+ utils.set_disttracing_headers(headers, trace_parent, transaction)\n return args, kwargs\n", "issue": "Breaking change in sync httpx refactor from #1403\nelastic/apm-agent-python#1403 refactored a lot of the httpx code. However it seems that the sync version of the instrumentation still calls `self._set_disttracing_headers` which was removed in the same PR, causing any form of sync httpx instrumentation to crash.\r\n\r\n**Environment (please complete the following information)**\r\n- Agent version: 6.7.1\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2021, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.instrumentation.packages.httpx import utils\nfrom elasticapm.traces import DroppedSpan, capture_span, execution_context\nfrom elasticapm.utils import default_ports\nfrom elasticapm.utils.disttracing import TracingOptions\n\n\nclass HTTPCoreInstrumentation(AbstractInstrumentedModule):\n name = \"httpcore\"\n\n instrument_list = [\n (\"httpcore._sync.connection\", \"SyncHTTPConnection.request\"), # < httpcore 0.13\n (\"httpcore._sync.connection\", \"SyncHTTPConnection.handle_request\"), # >= httpcore 0.13\n (\"httpcore._sync.connection\", \"HTTPConnection.handle_request\"), # httpcore >= 0.14 (hopefully...)\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n url, method, headers = utils.get_request_data(args, kwargs)\n scheme, host, port, target = url\n if port != default_ports.get(scheme):\n host += \":\" + str(port)\n\n signature = \"%s %s\" % (method.upper(), host)\n\n url = \"%s://%s%s\" % (scheme, host, target)\n\n transaction = execution_context.get_transaction()\n\n with capture_span(\n signature,\n span_type=\"external\",\n span_subtype=\"http\",\n extra={\"http\": {\"url\": url}},\n leaf=True,\n ) as span:\n # if httpcore has been called in a leaf span, this span might be a DroppedSpan.\n leaf_span = span\n while isinstance(leaf_span, DroppedSpan):\n leaf_span = leaf_span.parent\n\n if headers is not None:\n # It's possible that there are only dropped spans, e.g. if we started dropping spans.\n # In this case, the transaction.id is used\n parent_id = leaf_span.id if leaf_span else transaction.id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=parent_id, trace_options=TracingOptions(recorded=True)\n )\n utils.set_disttracing_headers(headers, trace_parent, transaction)\n if leaf_span:\n leaf_span.dist_tracing_propagated = True\n response = wrapped(*args, **kwargs)\n status_code = utils.get_status(response)\n if status_code:\n if span.context:\n span.context[\"http\"][\"status_code\"] = status_code\n span.set_success() if status_code < 400 else span.set_failure()\n return response\n\n def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):\n # since we don't have a span, we set the span id to the transaction id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=transaction.id, trace_options=TracingOptions(recorded=False)\n )\n if \"headers\" in kwargs:\n headers = kwargs[\"headers\"]\n if headers is None:\n headers = []\n kwargs[\"headers\"] = headers\n self._set_disttracing_headers(headers, trace_parent, transaction)\n return args, kwargs\n", "path": "elasticapm/instrumentation/packages/httpx/sync/httpcore.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2021, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.instrumentation.packages.httpx import utils\nfrom elasticapm.traces import DroppedSpan, capture_span, execution_context\nfrom elasticapm.utils import default_ports\nfrom elasticapm.utils.disttracing import TracingOptions\n\n\nclass HTTPCoreInstrumentation(AbstractInstrumentedModule):\n name = \"httpcore\"\n\n instrument_list = [\n (\"httpcore._sync.connection\", \"SyncHTTPConnection.request\"), # < httpcore 0.13\n (\"httpcore._sync.connection\", \"SyncHTTPConnection.handle_request\"), # >= httpcore 0.13\n (\"httpcore._sync.connection\", \"HTTPConnection.handle_request\"), # httpcore >= 0.14 (hopefully...)\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n url, method, headers = utils.get_request_data(args, kwargs)\n scheme, host, port, target = url\n if port != default_ports.get(scheme):\n host += \":\" + str(port)\n\n signature = \"%s %s\" % (method.upper(), host)\n\n url = \"%s://%s%s\" % (scheme, host, target)\n\n transaction = execution_context.get_transaction()\n\n with capture_span(\n signature,\n span_type=\"external\",\n span_subtype=\"http\",\n extra={\"http\": {\"url\": url}},\n leaf=True,\n ) as span:\n # if httpcore has been called in a leaf span, this span might be a DroppedSpan.\n leaf_span = span\n while isinstance(leaf_span, DroppedSpan):\n leaf_span = leaf_span.parent\n\n if headers is not None:\n # It's possible that there are only dropped spans, e.g. if we started dropping spans.\n # In this case, the transaction.id is used\n parent_id = leaf_span.id if leaf_span else transaction.id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=parent_id, trace_options=TracingOptions(recorded=True)\n )\n utils.set_disttracing_headers(headers, trace_parent, transaction)\n if leaf_span:\n leaf_span.dist_tracing_propagated = True\n response = wrapped(*args, **kwargs)\n status_code = utils.get_status(response)\n if status_code:\n if span.context:\n span.context[\"http\"][\"status_code\"] = status_code\n span.set_success() if status_code < 400 else span.set_failure()\n return response\n\n def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):\n # since we don't have a span, we set the span id to the transaction id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=transaction.id, trace_options=TracingOptions(recorded=False)\n )\n headers = utils.get_request_data(args, kwargs)[2]\n utils.set_disttracing_headers(headers, trace_parent, transaction)\n return args, kwargs\n", "path": "elasticapm/instrumentation/packages/httpx/sync/httpcore.py"}]}
1,569
203
gh_patches_debug_28407
rasdani/github-patches
git_diff
pantsbuild__pants-16295
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Not able to load resources when using pants vs sbt **Describe the bug** When using sbt we are able to call `Thread.currentThread().getContextClassLoader().getResources` and get a list of URLs. When using pants the list is empty. This at the moment limits us from using Flyway with pants. **Pants version** 2.13.0a1 and main. **OS** MacOS **Additional info** Example repo to reproduce the issue: https://github.com/somdoron/test-pants-resources I think the issue is, that pants only compress files in the resources zip file and not the directories. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/python/pants/jvm/resources.py` Content: ``` 1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 import itertools 4 import logging 5 from itertools import chain 6 7 from pants.core.target_types import ResourcesFieldSet, ResourcesGeneratorFieldSet 8 from pants.core.util_rules import stripped_source_files 9 from pants.core.util_rules.source_files import SourceFilesRequest 10 from pants.core.util_rules.stripped_source_files import StrippedSourceFiles 11 from pants.core.util_rules.system_binaries import ZipBinary 12 from pants.engine.fs import Digest, MergeDigests 13 from pants.engine.internals.selectors import MultiGet 14 from pants.engine.process import Process, ProcessResult 15 from pants.engine.rules import Get, collect_rules, rule 16 from pants.engine.target import SourcesField 17 from pants.engine.unions import UnionRule 18 from pants.jvm import compile 19 from pants.jvm.compile import ( 20 ClasspathDependenciesRequest, 21 ClasspathEntry, 22 ClasspathEntryRequest, 23 ClasspathEntryRequests, 24 CompileResult, 25 FallibleClasspathEntries, 26 FallibleClasspathEntry, 27 ) 28 29 logger = logging.getLogger(__name__) 30 31 32 class JvmResourcesRequest(ClasspathEntryRequest): 33 field_sets = ( 34 ResourcesFieldSet, 35 ResourcesGeneratorFieldSet, 36 ) 37 38 39 @rule(desc="Assemble resources") 40 async def assemble_resources_jar( 41 zip: ZipBinary, 42 request: JvmResourcesRequest, 43 ) -> FallibleClasspathEntry: 44 # Request the component's direct dependency classpath, and additionally any prerequisite. 45 # Filter out any dependencies that are generated by our current target so that each resource 46 # only appears in a single input JAR. 47 # NOTE: Generated dependencies will have the same dependencies as the current target, so we 48 # don't need to inspect those dependencies. 49 optional_prereq_request = [*((request.prerequisite,) if request.prerequisite else ())] 50 fallibles = await MultiGet( 51 Get(FallibleClasspathEntries, ClasspathEntryRequests(optional_prereq_request)), 52 Get(FallibleClasspathEntries, ClasspathDependenciesRequest(request, ignore_generated=True)), 53 ) 54 direct_dependency_classpath_entries = FallibleClasspathEntries( 55 itertools.chain(*fallibles) 56 ).if_all_succeeded() 57 58 if direct_dependency_classpath_entries is None: 59 return FallibleClasspathEntry( 60 description=str(request.component), 61 result=CompileResult.DEPENDENCY_FAILED, 62 output=None, 63 exit_code=1, 64 ) 65 66 source_files = await Get( 67 StrippedSourceFiles, 68 SourceFilesRequest([tgt.get(SourcesField) for tgt in request.component.members]), 69 ) 70 71 output_filename = f"{request.component.representative.address.path_safe_spec}.resources.jar" 72 output_files = [output_filename] 73 74 resources_jar_input_digest = source_files.snapshot.digest 75 resources_jar_result = await Get( 76 ProcessResult, 77 Process( 78 argv=[ 79 zip.path, 80 output_filename, 81 *source_files.snapshot.files, 82 ], 83 description="Build partial JAR containing resources files", 84 input_digest=resources_jar_input_digest, 85 output_files=output_files, 86 ), 87 ) 88 89 cpe = ClasspathEntry(resources_jar_result.output_digest, output_files, []) 90 91 merged_cpe_digest = await Get( 92 Digest, 93 MergeDigests(chain((cpe.digest,), (i.digest for i in direct_dependency_classpath_entries))), 94 ) 95 96 merged_cpe = ClasspathEntry.merge( 97 digest=merged_cpe_digest, entries=[cpe, *direct_dependency_classpath_entries] 98 ) 99 100 return FallibleClasspathEntry(output_filename, CompileResult.SUCCEEDED, merged_cpe, 0) 101 102 103 def rules(): 104 return [ 105 *collect_rules(), 106 *compile.rules(), 107 *stripped_source_files.rules(), 108 UnionRule(ClasspathEntryRequest, JvmResourcesRequest), 109 ] 110 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/python/pants/jvm/resources.py b/src/python/pants/jvm/resources.py --- a/src/python/pants/jvm/resources.py +++ b/src/python/pants/jvm/resources.py @@ -3,6 +3,7 @@ import itertools import logging from itertools import chain +from pathlib import Path from pants.core.target_types import ResourcesFieldSet, ResourcesGeneratorFieldSet from pants.core.util_rules import stripped_source_files @@ -71,6 +72,13 @@ output_filename = f"{request.component.representative.address.path_safe_spec}.resources.jar" output_files = [output_filename] + # #16231: Valid JAR files need the directories of each resource file as well as the files + # themselves. + + paths = {Path(filename) for filename in source_files.snapshot.files} + directories = {parent for path in paths for parent in path.parents} + input_files = {str(path) for path in chain(paths, directories)} + resources_jar_input_digest = source_files.snapshot.digest resources_jar_result = await Get( ProcessResult, @@ -78,7 +86,7 @@ argv=[ zip.path, output_filename, - *source_files.snapshot.files, + *sorted(input_files), ], description="Build partial JAR containing resources files", input_digest=resources_jar_input_digest,
{"golden_diff": "diff --git a/src/python/pants/jvm/resources.py b/src/python/pants/jvm/resources.py\n--- a/src/python/pants/jvm/resources.py\n+++ b/src/python/pants/jvm/resources.py\n@@ -3,6 +3,7 @@\n import itertools\n import logging\n from itertools import chain\n+from pathlib import Path\n \n from pants.core.target_types import ResourcesFieldSet, ResourcesGeneratorFieldSet\n from pants.core.util_rules import stripped_source_files\n@@ -71,6 +72,13 @@\n output_filename = f\"{request.component.representative.address.path_safe_spec}.resources.jar\"\n output_files = [output_filename]\n \n+ # #16231: Valid JAR files need the directories of each resource file as well as the files\n+ # themselves.\n+\n+ paths = {Path(filename) for filename in source_files.snapshot.files}\n+ directories = {parent for path in paths for parent in path.parents}\n+ input_files = {str(path) for path in chain(paths, directories)}\n+\n resources_jar_input_digest = source_files.snapshot.digest\n resources_jar_result = await Get(\n ProcessResult,\n@@ -78,7 +86,7 @@\n argv=[\n zip.path,\n output_filename,\n- *source_files.snapshot.files,\n+ *sorted(input_files),\n ],\n description=\"Build partial JAR containing resources files\",\n input_digest=resources_jar_input_digest,\n", "issue": "Not able to load resources when using pants vs sbt\n**Describe the bug**\r\nWhen using sbt we are able to call `Thread.currentThread().getContextClassLoader().getResources` and get a list of URLs. When using pants the list is empty. \r\n\r\nThis at the moment limits us from using Flyway with pants.\r\n\r\n**Pants version**\r\n2.13.0a1 and main.\r\n\r\n**OS**\r\nMacOS\r\n\r\n**Additional info**\r\nExample repo to reproduce the issue:\r\nhttps://github.com/somdoron/test-pants-resources\r\n\r\nI think the issue is, that pants only compress files in the resources zip file and not the directories.\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nimport itertools\nimport logging\nfrom itertools import chain\n\nfrom pants.core.target_types import ResourcesFieldSet, ResourcesGeneratorFieldSet\nfrom pants.core.util_rules import stripped_source_files\nfrom pants.core.util_rules.source_files import SourceFilesRequest\nfrom pants.core.util_rules.stripped_source_files import StrippedSourceFiles\nfrom pants.core.util_rules.system_binaries import ZipBinary\nfrom pants.engine.fs import Digest, MergeDigests\nfrom pants.engine.internals.selectors import MultiGet\nfrom pants.engine.process import Process, ProcessResult\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.engine.target import SourcesField\nfrom pants.engine.unions import UnionRule\nfrom pants.jvm import compile\nfrom pants.jvm.compile import (\n ClasspathDependenciesRequest,\n ClasspathEntry,\n ClasspathEntryRequest,\n ClasspathEntryRequests,\n CompileResult,\n FallibleClasspathEntries,\n FallibleClasspathEntry,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass JvmResourcesRequest(ClasspathEntryRequest):\n field_sets = (\n ResourcesFieldSet,\n ResourcesGeneratorFieldSet,\n )\n\n\n@rule(desc=\"Assemble resources\")\nasync def assemble_resources_jar(\n zip: ZipBinary,\n request: JvmResourcesRequest,\n) -> FallibleClasspathEntry:\n # Request the component's direct dependency classpath, and additionally any prerequisite.\n # Filter out any dependencies that are generated by our current target so that each resource\n # only appears in a single input JAR.\n # NOTE: Generated dependencies will have the same dependencies as the current target, so we\n # don't need to inspect those dependencies.\n optional_prereq_request = [*((request.prerequisite,) if request.prerequisite else ())]\n fallibles = await MultiGet(\n Get(FallibleClasspathEntries, ClasspathEntryRequests(optional_prereq_request)),\n Get(FallibleClasspathEntries, ClasspathDependenciesRequest(request, ignore_generated=True)),\n )\n direct_dependency_classpath_entries = FallibleClasspathEntries(\n itertools.chain(*fallibles)\n ).if_all_succeeded()\n\n if direct_dependency_classpath_entries is None:\n return FallibleClasspathEntry(\n description=str(request.component),\n result=CompileResult.DEPENDENCY_FAILED,\n output=None,\n exit_code=1,\n )\n\n source_files = await Get(\n StrippedSourceFiles,\n SourceFilesRequest([tgt.get(SourcesField) for tgt in request.component.members]),\n )\n\n output_filename = f\"{request.component.representative.address.path_safe_spec}.resources.jar\"\n output_files = [output_filename]\n\n resources_jar_input_digest = source_files.snapshot.digest\n resources_jar_result = await Get(\n ProcessResult,\n Process(\n argv=[\n zip.path,\n output_filename,\n *source_files.snapshot.files,\n ],\n description=\"Build partial JAR containing resources files\",\n input_digest=resources_jar_input_digest,\n output_files=output_files,\n ),\n )\n\n cpe = ClasspathEntry(resources_jar_result.output_digest, output_files, [])\n\n merged_cpe_digest = await Get(\n Digest,\n MergeDigests(chain((cpe.digest,), (i.digest for i in direct_dependency_classpath_entries))),\n )\n\n merged_cpe = ClasspathEntry.merge(\n digest=merged_cpe_digest, entries=[cpe, *direct_dependency_classpath_entries]\n )\n\n return FallibleClasspathEntry(output_filename, CompileResult.SUCCEEDED, merged_cpe, 0)\n\n\ndef rules():\n return [\n *collect_rules(),\n *compile.rules(),\n *stripped_source_files.rules(),\n UnionRule(ClasspathEntryRequest, JvmResourcesRequest),\n ]\n", "path": "src/python/pants/jvm/resources.py"}], "after_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nimport itertools\nimport logging\nfrom itertools import chain\nfrom pathlib import Path\n\nfrom pants.core.target_types import ResourcesFieldSet, ResourcesGeneratorFieldSet\nfrom pants.core.util_rules import stripped_source_files\nfrom pants.core.util_rules.source_files import SourceFilesRequest\nfrom pants.core.util_rules.stripped_source_files import StrippedSourceFiles\nfrom pants.core.util_rules.system_binaries import ZipBinary\nfrom pants.engine.fs import Digest, MergeDigests\nfrom pants.engine.internals.selectors import MultiGet\nfrom pants.engine.process import Process, ProcessResult\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.engine.target import SourcesField\nfrom pants.engine.unions import UnionRule\nfrom pants.jvm import compile\nfrom pants.jvm.compile import (\n ClasspathDependenciesRequest,\n ClasspathEntry,\n ClasspathEntryRequest,\n ClasspathEntryRequests,\n CompileResult,\n FallibleClasspathEntries,\n FallibleClasspathEntry,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass JvmResourcesRequest(ClasspathEntryRequest):\n field_sets = (\n ResourcesFieldSet,\n ResourcesGeneratorFieldSet,\n )\n\n\n@rule(desc=\"Assemble resources\")\nasync def assemble_resources_jar(\n zip: ZipBinary,\n request: JvmResourcesRequest,\n) -> FallibleClasspathEntry:\n # Request the component's direct dependency classpath, and additionally any prerequisite.\n # Filter out any dependencies that are generated by our current target so that each resource\n # only appears in a single input JAR.\n # NOTE: Generated dependencies will have the same dependencies as the current target, so we\n # don't need to inspect those dependencies.\n optional_prereq_request = [*((request.prerequisite,) if request.prerequisite else ())]\n fallibles = await MultiGet(\n Get(FallibleClasspathEntries, ClasspathEntryRequests(optional_prereq_request)),\n Get(FallibleClasspathEntries, ClasspathDependenciesRequest(request, ignore_generated=True)),\n )\n direct_dependency_classpath_entries = FallibleClasspathEntries(\n itertools.chain(*fallibles)\n ).if_all_succeeded()\n\n if direct_dependency_classpath_entries is None:\n return FallibleClasspathEntry(\n description=str(request.component),\n result=CompileResult.DEPENDENCY_FAILED,\n output=None,\n exit_code=1,\n )\n\n source_files = await Get(\n StrippedSourceFiles,\n SourceFilesRequest([tgt.get(SourcesField) for tgt in request.component.members]),\n )\n\n output_filename = f\"{request.component.representative.address.path_safe_spec}.resources.jar\"\n output_files = [output_filename]\n\n # #16231: Valid JAR files need the directories of each resource file as well as the files\n # themselves.\n\n paths = {Path(filename) for filename in source_files.snapshot.files}\n directories = {parent for path in paths for parent in path.parents}\n input_files = {str(path) for path in chain(paths, directories)}\n\n resources_jar_input_digest = source_files.snapshot.digest\n resources_jar_result = await Get(\n ProcessResult,\n Process(\n argv=[\n zip.path,\n output_filename,\n *sorted(input_files),\n ],\n description=\"Build partial JAR containing resources files\",\n input_digest=resources_jar_input_digest,\n output_files=output_files,\n ),\n )\n\n cpe = ClasspathEntry(resources_jar_result.output_digest, output_files, [])\n\n merged_cpe_digest = await Get(\n Digest,\n MergeDigests(chain((cpe.digest,), (i.digest for i in direct_dependency_classpath_entries))),\n )\n\n merged_cpe = ClasspathEntry.merge(\n digest=merged_cpe_digest, entries=[cpe, *direct_dependency_classpath_entries]\n )\n\n return FallibleClasspathEntry(output_filename, CompileResult.SUCCEEDED, merged_cpe, 0)\n\n\ndef rules():\n return [\n *collect_rules(),\n *compile.rules(),\n *stripped_source_files.rules(),\n UnionRule(ClasspathEntryRequest, JvmResourcesRequest),\n ]\n", "path": "src/python/pants/jvm/resources.py"}]}
1,430
306
gh_patches_debug_28333
rasdani/github-patches
git_diff
pypa__pip-2766
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Version self check should not warn for post releases Post releases are explicitly designed to just fix small errors that won't affect the code itself, things like doc updates. However if we release a post release then the pip version self check will tell everyone to go download it, even though using it isn't really all that important. Ideally this should just ignore post releases. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pip/utils/outdated.py` Content: ``` 1 from __future__ import absolute_import 2 3 import datetime 4 import json 5 import logging 6 import os.path 7 import sys 8 9 from pip._vendor import lockfile 10 from pip._vendor import pkg_resources 11 12 from pip.compat import total_seconds 13 from pip.index import PyPI 14 from pip.locations import USER_CACHE_DIR, running_under_virtualenv 15 from pip.utils import ensure_dir 16 from pip.utils.filesystem import check_path_owner 17 18 19 SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ" 20 21 22 logger = logging.getLogger(__name__) 23 24 25 class VirtualenvSelfCheckState(object): 26 def __init__(self): 27 self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json") 28 29 # Load the existing state 30 try: 31 with open(self.statefile_path) as statefile: 32 self.state = json.load(statefile) 33 except (IOError, ValueError): 34 self.state = {} 35 36 def save(self, pypi_version, current_time): 37 # Attempt to write out our version check file 38 with open(self.statefile_path, "w") as statefile: 39 json.dump( 40 { 41 "last_check": current_time.strftime(SELFCHECK_DATE_FMT), 42 "pypi_version": pypi_version, 43 }, 44 statefile, 45 sort_keys=True, 46 separators=(",", ":") 47 ) 48 49 50 class GlobalSelfCheckState(object): 51 def __init__(self): 52 self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json") 53 54 # Load the existing state 55 try: 56 with open(self.statefile_path) as statefile: 57 self.state = json.load(statefile)[sys.prefix] 58 except (IOError, ValueError, KeyError): 59 self.state = {} 60 61 def save(self, pypi_version, current_time): 62 # Check to make sure that we own the directory 63 if not check_path_owner(os.path.dirname(self.statefile_path)): 64 return 65 66 # Now that we've ensured the directory is owned by this user, we'll go 67 # ahead and make sure that all our directories are created. 68 ensure_dir(os.path.dirname(self.statefile_path)) 69 70 # Attempt to write out our version check file 71 with lockfile.LockFile(self.statefile_path): 72 if os.path.exists(self.statefile_path): 73 with open(self.statefile_path) as statefile: 74 state = json.load(statefile) 75 else: 76 state = {} 77 78 state[sys.prefix] = { 79 "last_check": current_time.strftime(SELFCHECK_DATE_FMT), 80 "pypi_version": pypi_version, 81 } 82 83 with open(self.statefile_path, "w") as statefile: 84 json.dump(state, statefile, sort_keys=True, 85 separators=(",", ":")) 86 87 88 def load_selfcheck_statefile(): 89 if running_under_virtualenv(): 90 return VirtualenvSelfCheckState() 91 else: 92 return GlobalSelfCheckState() 93 94 95 def pip_version_check(session): 96 """Check for an update for pip. 97 98 Limit the frequency of checks to once per week. State is stored either in 99 the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix 100 of the pip script path. 101 """ 102 import pip # imported here to prevent circular imports 103 pypi_version = None 104 105 try: 106 state = load_selfcheck_statefile() 107 108 current_time = datetime.datetime.utcnow() 109 # Determine if we need to refresh the state 110 if "last_check" in state.state and "pypi_version" in state.state: 111 last_check = datetime.datetime.strptime( 112 state.state["last_check"], 113 SELFCHECK_DATE_FMT 114 ) 115 if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60: 116 pypi_version = state.state["pypi_version"] 117 118 # Refresh the version if we need to or just see if we need to warn 119 if pypi_version is None: 120 resp = session.get( 121 PyPI.pip_json_url, 122 headers={"Accept": "application/json"}, 123 ) 124 resp.raise_for_status() 125 pypi_version = resp.json()["info"]["version"] 126 127 # save that we've performed a check 128 state.save(pypi_version, current_time) 129 130 pip_version = pkg_resources.parse_version(pip.__version__) 131 132 # Determine if our pypi_version is older 133 if pip_version < pkg_resources.parse_version(pypi_version): 134 logger.warning( 135 "You are using pip version %s, however version %s is " 136 "available.\nYou should consider upgrading via the " 137 "'pip install --upgrade pip' command." % (pip.__version__, 138 pypi_version) 139 ) 140 141 except Exception: 142 logger.debug( 143 "There was an error checking the latest version of pip", 144 exc_info=True, 145 ) 146 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pip/utils/outdated.py b/pip/utils/outdated.py --- a/pip/utils/outdated.py +++ b/pip/utils/outdated.py @@ -7,7 +7,7 @@ import sys from pip._vendor import lockfile -from pip._vendor import pkg_resources +from pip._vendor.packaging import version as packaging_version from pip.compat import total_seconds from pip.index import PyPI @@ -122,15 +122,23 @@ headers={"Accept": "application/json"}, ) resp.raise_for_status() - pypi_version = resp.json()["info"]["version"] + pypi_version = [ + v for v in sorted( + list(resp.json()["releases"]), + key=packaging_version.parse, + ) + if not packaging_version.parse(v).is_prerelease + ][-1] # save that we've performed a check state.save(pypi_version, current_time) - pip_version = pkg_resources.parse_version(pip.__version__) + pip_version = packaging_version.parse(pip.__version__) + remote_version = packaging_version.parse(pypi_version) # Determine if our pypi_version is older - if pip_version < pkg_resources.parse_version(pypi_version): + if (pip_version < remote_version and + pip_version.base_version != remote_version.base_version): logger.warning( "You are using pip version %s, however version %s is " "available.\nYou should consider upgrading via the "
{"golden_diff": "diff --git a/pip/utils/outdated.py b/pip/utils/outdated.py\n--- a/pip/utils/outdated.py\n+++ b/pip/utils/outdated.py\n@@ -7,7 +7,7 @@\n import sys\n \n from pip._vendor import lockfile\n-from pip._vendor import pkg_resources\n+from pip._vendor.packaging import version as packaging_version\n \n from pip.compat import total_seconds\n from pip.index import PyPI\n@@ -122,15 +122,23 @@\n headers={\"Accept\": \"application/json\"},\n )\n resp.raise_for_status()\n- pypi_version = resp.json()[\"info\"][\"version\"]\n+ pypi_version = [\n+ v for v in sorted(\n+ list(resp.json()[\"releases\"]),\n+ key=packaging_version.parse,\n+ )\n+ if not packaging_version.parse(v).is_prerelease\n+ ][-1]\n \n # save that we've performed a check\n state.save(pypi_version, current_time)\n \n- pip_version = pkg_resources.parse_version(pip.__version__)\n+ pip_version = packaging_version.parse(pip.__version__)\n+ remote_version = packaging_version.parse(pypi_version)\n \n # Determine if our pypi_version is older\n- if pip_version < pkg_resources.parse_version(pypi_version):\n+ if (pip_version < remote_version and\n+ pip_version.base_version != remote_version.base_version):\n logger.warning(\n \"You are using pip version %s, however version %s is \"\n \"available.\\nYou should consider upgrading via the \"\n", "issue": "Version self check should not warn for post releases\nPost releases are explicitly designed to just fix small errors that won't affect the code itself, things like doc updates. However if we release a post release then the pip version self check will tell everyone to go download it, even though using it isn't really all that important.\n\nIdeally this should just ignore post releases.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport datetime\nimport json\nimport logging\nimport os.path\nimport sys\n\nfrom pip._vendor import lockfile\nfrom pip._vendor import pkg_resources\n\nfrom pip.compat import total_seconds\nfrom pip.index import PyPI\nfrom pip.locations import USER_CACHE_DIR, running_under_virtualenv\nfrom pip.utils import ensure_dir\nfrom pip.utils.filesystem import check_path_owner\n\n\nSELFCHECK_DATE_FMT = \"%Y-%m-%dT%H:%M:%SZ\"\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass VirtualenvSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(sys.prefix, \"pip-selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)\n except (IOError, ValueError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Attempt to write out our version check file\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(\n {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n },\n statefile,\n sort_keys=True,\n separators=(\",\", \":\")\n )\n\n\nclass GlobalSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(USER_CACHE_DIR, \"selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)[sys.prefix]\n except (IOError, ValueError, KeyError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Check to make sure that we own the directory\n if not check_path_owner(os.path.dirname(self.statefile_path)):\n return\n\n # Now that we've ensured the directory is owned by this user, we'll go\n # ahead and make sure that all our directories are created.\n ensure_dir(os.path.dirname(self.statefile_path))\n\n # Attempt to write out our version check file\n with lockfile.LockFile(self.statefile_path):\n if os.path.exists(self.statefile_path):\n with open(self.statefile_path) as statefile:\n state = json.load(statefile)\n else:\n state = {}\n\n state[sys.prefix] = {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n }\n\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(state, statefile, sort_keys=True,\n separators=(\",\", \":\"))\n\n\ndef load_selfcheck_statefile():\n if running_under_virtualenv():\n return VirtualenvSelfCheckState()\n else:\n return GlobalSelfCheckState()\n\n\ndef pip_version_check(session):\n \"\"\"Check for an update for pip.\n\n Limit the frequency of checks to once per week. State is stored either in\n the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix\n of the pip script path.\n \"\"\"\n import pip # imported here to prevent circular imports\n pypi_version = None\n\n try:\n state = load_selfcheck_statefile()\n\n current_time = datetime.datetime.utcnow()\n # Determine if we need to refresh the state\n if \"last_check\" in state.state and \"pypi_version\" in state.state:\n last_check = datetime.datetime.strptime(\n state.state[\"last_check\"],\n SELFCHECK_DATE_FMT\n )\n if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:\n pypi_version = state.state[\"pypi_version\"]\n\n # Refresh the version if we need to or just see if we need to warn\n if pypi_version is None:\n resp = session.get(\n PyPI.pip_json_url,\n headers={\"Accept\": \"application/json\"},\n )\n resp.raise_for_status()\n pypi_version = resp.json()[\"info\"][\"version\"]\n\n # save that we've performed a check\n state.save(pypi_version, current_time)\n\n pip_version = pkg_resources.parse_version(pip.__version__)\n\n # Determine if our pypi_version is older\n if pip_version < pkg_resources.parse_version(pypi_version):\n logger.warning(\n \"You are using pip version %s, however version %s is \"\n \"available.\\nYou should consider upgrading via the \"\n \"'pip install --upgrade pip' command.\" % (pip.__version__,\n pypi_version)\n )\n\n except Exception:\n logger.debug(\n \"There was an error checking the latest version of pip\",\n exc_info=True,\n )\n", "path": "pip/utils/outdated.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport datetime\nimport json\nimport logging\nimport os.path\nimport sys\n\nfrom pip._vendor import lockfile\nfrom pip._vendor.packaging import version as packaging_version\n\nfrom pip.compat import total_seconds\nfrom pip.index import PyPI\nfrom pip.locations import USER_CACHE_DIR, running_under_virtualenv\nfrom pip.utils import ensure_dir\nfrom pip.utils.filesystem import check_path_owner\n\n\nSELFCHECK_DATE_FMT = \"%Y-%m-%dT%H:%M:%SZ\"\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass VirtualenvSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(sys.prefix, \"pip-selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)\n except (IOError, ValueError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Attempt to write out our version check file\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(\n {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n },\n statefile,\n sort_keys=True,\n separators=(\",\", \":\")\n )\n\n\nclass GlobalSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(USER_CACHE_DIR, \"selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)[sys.prefix]\n except (IOError, ValueError, KeyError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Check to make sure that we own the directory\n if not check_path_owner(os.path.dirname(self.statefile_path)):\n return\n\n # Now that we've ensured the directory is owned by this user, we'll go\n # ahead and make sure that all our directories are created.\n ensure_dir(os.path.dirname(self.statefile_path))\n\n # Attempt to write out our version check file\n with lockfile.LockFile(self.statefile_path):\n if os.path.exists(self.statefile_path):\n with open(self.statefile_path) as statefile:\n state = json.load(statefile)\n else:\n state = {}\n\n state[sys.prefix] = {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n }\n\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(state, statefile, sort_keys=True,\n separators=(\",\", \":\"))\n\n\ndef load_selfcheck_statefile():\n if running_under_virtualenv():\n return VirtualenvSelfCheckState()\n else:\n return GlobalSelfCheckState()\n\n\ndef pip_version_check(session):\n \"\"\"Check for an update for pip.\n\n Limit the frequency of checks to once per week. State is stored either in\n the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix\n of the pip script path.\n \"\"\"\n import pip # imported here to prevent circular imports\n pypi_version = None\n\n try:\n state = load_selfcheck_statefile()\n\n current_time = datetime.datetime.utcnow()\n # Determine if we need to refresh the state\n if \"last_check\" in state.state and \"pypi_version\" in state.state:\n last_check = datetime.datetime.strptime(\n state.state[\"last_check\"],\n SELFCHECK_DATE_FMT\n )\n if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:\n pypi_version = state.state[\"pypi_version\"]\n\n # Refresh the version if we need to or just see if we need to warn\n if pypi_version is None:\n resp = session.get(\n PyPI.pip_json_url,\n headers={\"Accept\": \"application/json\"},\n )\n resp.raise_for_status()\n pypi_version = [\n v for v in sorted(\n list(resp.json()[\"releases\"]),\n key=packaging_version.parse,\n )\n if not packaging_version.parse(v).is_prerelease\n ][-1]\n\n # save that we've performed a check\n state.save(pypi_version, current_time)\n\n pip_version = packaging_version.parse(pip.__version__)\n remote_version = packaging_version.parse(pypi_version)\n\n # Determine if our pypi_version is older\n if (pip_version < remote_version and\n pip_version.base_version != remote_version.base_version):\n logger.warning(\n \"You are using pip version %s, however version %s is \"\n \"available.\\nYou should consider upgrading via the \"\n \"'pip install --upgrade pip' command.\" % (pip.__version__,\n pypi_version)\n )\n\n except Exception:\n logger.debug(\n \"There was an error checking the latest version of pip\",\n exc_info=True,\n )\n", "path": "pip/utils/outdated.py"}]}
1,691
336
gh_patches_debug_22793
rasdani/github-patches
git_diff
chainer__chainer-3770
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- F.inv behaves differently between CPU/GPU Currently, F.inv ignores error flags (info) from cuBLAS. https://github.com/chainer/chainer/blob/v3.0.0/chainer/functions/math/inv.py#L56 As a result, it behaves differently between CPU/GPU. When CPU mode: ```py chainer.functions.inv(np.zeros(4, dtype=np.float32).reshape((2,2))) ``` raises exception (`numpy.linalg.linalg.LinAlgError: Singular matrix`), while GPU mode: ```py chainer.functions.inv(cp.zeros(4, dtype=np.float32).reshape((2,2))) ``` no exception occurs and ```py variable([[ nan, nan], [ nan, inf]]) ``` is returned. I think it is better to change GPU mode behavior to raise Exception, so that users can notice the error immediately. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `chainer/functions/math/inv.py` Content: ``` 1 import numpy.linalg 2 3 from chainer import cuda 4 from chainer import function_node 5 import chainer.functions 6 from chainer.functions.math import matmul 7 from chainer import utils 8 from chainer.utils import type_check 9 10 11 def _inv_gpu(b): 12 # We do a batched LU decomposition on the GPU to compute the inverse 13 # Change the shape of the array to be size=1 minibatch if necessary 14 # Also copy the matrix as the elments will be modified in-place 15 a = matmul._as_batch_mat(b).copy() 16 n = a.shape[1] 17 n_matrices = len(a) 18 # Pivot array 19 p = cuda.cupy.empty((n, n_matrices), dtype=numpy.int32) 20 # Output array 21 c = cuda.cupy.empty_like(a) 22 # These arrays hold information on the execution success 23 # or if the matrix was singular 24 info = cuda.cupy.empty(n_matrices, dtype=numpy.int32) 25 ap = matmul._mat_ptrs(a) 26 cp = matmul._mat_ptrs(c) 27 _, lda = matmul._get_ld(a) 28 _, ldc = matmul._get_ld(c) 29 handle = cuda.Device().cublas_handle 30 cuda.cublas.sgetrfBatched( 31 handle, n, ap.data.ptr, lda, p.data.ptr, info.data.ptr, n_matrices) 32 cuda.cublas.sgetriBatched( 33 handle, n, ap.data.ptr, lda, p.data.ptr, cp.data.ptr, ldc, 34 info.data.ptr, n_matrices) 35 return c, info 36 37 38 class Inv(function_node.FunctionNode): 39 40 def check_type_forward(self, in_types): 41 type_check.expect(in_types.size() == 1) 42 a_type, = in_types 43 type_check.expect(a_type.dtype == numpy.float32) 44 # Only 2D array shapes allowed 45 type_check.expect(a_type.ndim == 2) 46 # Matrix inversion only allowed for square matrices 47 type_check.expect(a_type.shape[0] == a_type.shape[1]) 48 49 def forward_cpu(self, x): 50 self.retain_outputs((0,)) 51 invx = utils.force_array(numpy.linalg.inv(x[0])) 52 return invx, 53 54 def forward_gpu(self, x): 55 self.retain_outputs((0,)) 56 shape = x[0].shape 57 invx = _inv_gpu(x[0].reshape(1, *shape))[0].reshape(shape) 58 return invx, 59 60 def backward(self, x, gy): 61 invx, = self.get_retained_outputs() 62 # Gradient is - x^-T (dx) x^-T 63 invxT = chainer.functions.transpose(invx) 64 gx = chainer.functions.matmul( 65 chainer.functions.matmul(- invxT, gy[0]), invxT) 66 return gx, 67 68 69 class BatchInv(function_node.FunctionNode): 70 71 def check_type_forward(self, in_types): 72 type_check.expect(in_types.size() == 1) 73 a_type, = in_types 74 type_check.expect(a_type.dtype == numpy.float32) 75 # Only a minibatch of 2D array shapes allowed 76 type_check.expect(a_type.ndim == 3) 77 # Matrix inversion only allowed for square matrices 78 # so assert the last two dimensions are equal 79 type_check.expect(a_type.shape[-1] == a_type.shape[-2]) 80 81 def forward_cpu(self, x): 82 self.retain_outputs((0,)) 83 invx = utils.force_array(numpy.linalg.inv(x[0])) 84 return invx, 85 86 def forward_gpu(self, x): 87 self.retain_outputs((0,)) 88 invx, _ = _inv_gpu(x[0]) 89 return invx, 90 91 def backward(self, x, gy): 92 invx, = self.get_retained_outputs() 93 # Unpack 1-length tuples 94 gy, = gy 95 # Gradient is - x^-T (dx) x^-T 96 ret = chainer.functions.matmul(-invx, gy, transa=True) 97 ret2 = chainer.functions.matmul(ret, invx, transb=True) 98 return ret2, 99 100 101 def inv(a): 102 """Computes the inverse of square matrix. 103 104 Args: 105 a (Variable): Input array to compute the inverse for. Shape of 106 the array should be ``(n, n)`` where ``n`` is the dimensionality of 107 a square matrix. 108 109 Returns: 110 ~chainer.Variable: Matrix inverse of ``a``. 111 """ 112 return Inv().apply((a,))[0] 113 114 115 def batch_inv(a): 116 """Computes the inverse of a batch of square matrices. 117 118 Args: 119 a (Variable): Input array to compute the inverse for. Shape of 120 the array should be ``(m, n, n)`` where ``m`` is the number of 121 matrices in the batch, and ``n`` is the dimensionality of a square 122 matrix. 123 124 Returns: 125 ~chainer.Variable: Inverse of every matrix in the batch of matrices. 126 """ 127 return BatchInv().apply((a,))[0] 128 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/chainer/functions/math/inv.py b/chainer/functions/math/inv.py --- a/chainer/functions/math/inv.py +++ b/chainer/functions/math/inv.py @@ -1,5 +1,6 @@ import numpy.linalg +import chainer from chainer import cuda from chainer import function_node import chainer.functions @@ -54,7 +55,11 @@ def forward_gpu(self, x): self.retain_outputs((0,)) shape = x[0].shape - invx = _inv_gpu(x[0].reshape(1, *shape))[0].reshape(shape) + invx, info = _inv_gpu(x[0].reshape(1, *shape)) + if chainer.is_debug(): + if cuda.cupy.any(info != 0): + raise ValueError('Input has singular matrices.') + invx = invx.reshape(shape) return invx, def backward(self, x, gy): @@ -85,7 +90,10 @@ def forward_gpu(self, x): self.retain_outputs((0,)) - invx, _ = _inv_gpu(x[0]) + invx, info = _inv_gpu(x[0]) + if chainer.is_debug(): + if cuda.cupy.any(info != 0): + raise ValueError('Input has singular matrices.') return invx, def backward(self, x, gy):
{"golden_diff": "diff --git a/chainer/functions/math/inv.py b/chainer/functions/math/inv.py\n--- a/chainer/functions/math/inv.py\n+++ b/chainer/functions/math/inv.py\n@@ -1,5 +1,6 @@\n import numpy.linalg\n \n+import chainer\n from chainer import cuda\n from chainer import function_node\n import chainer.functions\n@@ -54,7 +55,11 @@\n def forward_gpu(self, x):\n self.retain_outputs((0,))\n shape = x[0].shape\n- invx = _inv_gpu(x[0].reshape(1, *shape))[0].reshape(shape)\n+ invx, info = _inv_gpu(x[0].reshape(1, *shape))\n+ if chainer.is_debug():\n+ if cuda.cupy.any(info != 0):\n+ raise ValueError('Input has singular matrices.')\n+ invx = invx.reshape(shape)\n return invx,\n \n def backward(self, x, gy):\n@@ -85,7 +90,10 @@\n \n def forward_gpu(self, x):\n self.retain_outputs((0,))\n- invx, _ = _inv_gpu(x[0])\n+ invx, info = _inv_gpu(x[0])\n+ if chainer.is_debug():\n+ if cuda.cupy.any(info != 0):\n+ raise ValueError('Input has singular matrices.')\n return invx,\n \n def backward(self, x, gy):\n", "issue": "F.inv behaves differently between CPU/GPU\nCurrently, F.inv ignores error flags (info) from cuBLAS.\r\n\r\nhttps://github.com/chainer/chainer/blob/v3.0.0/chainer/functions/math/inv.py#L56\r\n\r\nAs a result, it behaves differently between CPU/GPU.\r\n\r\nWhen CPU mode:\r\n\r\n```py\r\nchainer.functions.inv(np.zeros(4, dtype=np.float32).reshape((2,2)))\r\n```\r\n\r\nraises exception (`numpy.linalg.linalg.LinAlgError: Singular matrix`), while GPU mode:\r\n\r\n```py\r\nchainer.functions.inv(cp.zeros(4, dtype=np.float32).reshape((2,2)))\r\n```\r\n\r\nno exception occurs and\r\n\r\n```py\r\nvariable([[ nan, nan],\r\n [ nan, inf]])\r\n```\r\n\r\nis returned.\r\n\r\nI think it is better to change GPU mode behavior to raise Exception, so that users can notice the error immediately.\n", "before_files": [{"content": "import numpy.linalg\n\nfrom chainer import cuda\nfrom chainer import function_node\nimport chainer.functions\nfrom chainer.functions.math import matmul\nfrom chainer import utils\nfrom chainer.utils import type_check\n\n\ndef _inv_gpu(b):\n # We do a batched LU decomposition on the GPU to compute the inverse\n # Change the shape of the array to be size=1 minibatch if necessary\n # Also copy the matrix as the elments will be modified in-place\n a = matmul._as_batch_mat(b).copy()\n n = a.shape[1]\n n_matrices = len(a)\n # Pivot array\n p = cuda.cupy.empty((n, n_matrices), dtype=numpy.int32)\n # Output array\n c = cuda.cupy.empty_like(a)\n # These arrays hold information on the execution success\n # or if the matrix was singular\n info = cuda.cupy.empty(n_matrices, dtype=numpy.int32)\n ap = matmul._mat_ptrs(a)\n cp = matmul._mat_ptrs(c)\n _, lda = matmul._get_ld(a)\n _, ldc = matmul._get_ld(c)\n handle = cuda.Device().cublas_handle\n cuda.cublas.sgetrfBatched(\n handle, n, ap.data.ptr, lda, p.data.ptr, info.data.ptr, n_matrices)\n cuda.cublas.sgetriBatched(\n handle, n, ap.data.ptr, lda, p.data.ptr, cp.data.ptr, ldc,\n info.data.ptr, n_matrices)\n return c, info\n\n\nclass Inv(function_node.FunctionNode):\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n a_type, = in_types\n type_check.expect(a_type.dtype == numpy.float32)\n # Only 2D array shapes allowed\n type_check.expect(a_type.ndim == 2)\n # Matrix inversion only allowed for square matrices\n type_check.expect(a_type.shape[0] == a_type.shape[1])\n\n def forward_cpu(self, x):\n self.retain_outputs((0,))\n invx = utils.force_array(numpy.linalg.inv(x[0]))\n return invx,\n\n def forward_gpu(self, x):\n self.retain_outputs((0,))\n shape = x[0].shape\n invx = _inv_gpu(x[0].reshape(1, *shape))[0].reshape(shape)\n return invx,\n\n def backward(self, x, gy):\n invx, = self.get_retained_outputs()\n # Gradient is - x^-T (dx) x^-T\n invxT = chainer.functions.transpose(invx)\n gx = chainer.functions.matmul(\n chainer.functions.matmul(- invxT, gy[0]), invxT)\n return gx,\n\n\nclass BatchInv(function_node.FunctionNode):\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n a_type, = in_types\n type_check.expect(a_type.dtype == numpy.float32)\n # Only a minibatch of 2D array shapes allowed\n type_check.expect(a_type.ndim == 3)\n # Matrix inversion only allowed for square matrices\n # so assert the last two dimensions are equal\n type_check.expect(a_type.shape[-1] == a_type.shape[-2])\n\n def forward_cpu(self, x):\n self.retain_outputs((0,))\n invx = utils.force_array(numpy.linalg.inv(x[0]))\n return invx,\n\n def forward_gpu(self, x):\n self.retain_outputs((0,))\n invx, _ = _inv_gpu(x[0])\n return invx,\n\n def backward(self, x, gy):\n invx, = self.get_retained_outputs()\n # Unpack 1-length tuples\n gy, = gy\n # Gradient is - x^-T (dx) x^-T\n ret = chainer.functions.matmul(-invx, gy, transa=True)\n ret2 = chainer.functions.matmul(ret, invx, transb=True)\n return ret2,\n\n\ndef inv(a):\n \"\"\"Computes the inverse of square matrix.\n\n Args:\n a (Variable): Input array to compute the inverse for. Shape of\n the array should be ``(n, n)`` where ``n`` is the dimensionality of\n a square matrix.\n\n Returns:\n ~chainer.Variable: Matrix inverse of ``a``.\n \"\"\"\n return Inv().apply((a,))[0]\n\n\ndef batch_inv(a):\n \"\"\"Computes the inverse of a batch of square matrices.\n\n Args:\n a (Variable): Input array to compute the inverse for. Shape of\n the array should be ``(m, n, n)`` where ``m`` is the number of\n matrices in the batch, and ``n`` is the dimensionality of a square\n matrix.\n\n Returns:\n ~chainer.Variable: Inverse of every matrix in the batch of matrices.\n \"\"\"\n return BatchInv().apply((a,))[0]\n", "path": "chainer/functions/math/inv.py"}], "after_files": [{"content": "import numpy.linalg\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import function_node\nimport chainer.functions\nfrom chainer.functions.math import matmul\nfrom chainer import utils\nfrom chainer.utils import type_check\n\n\ndef _inv_gpu(b):\n # We do a batched LU decomposition on the GPU to compute the inverse\n # Change the shape of the array to be size=1 minibatch if necessary\n # Also copy the matrix as the elments will be modified in-place\n a = matmul._as_batch_mat(b).copy()\n n = a.shape[1]\n n_matrices = len(a)\n # Pivot array\n p = cuda.cupy.empty((n, n_matrices), dtype=numpy.int32)\n # Output array\n c = cuda.cupy.empty_like(a)\n # These arrays hold information on the execution success\n # or if the matrix was singular\n info = cuda.cupy.empty(n_matrices, dtype=numpy.int32)\n ap = matmul._mat_ptrs(a)\n cp = matmul._mat_ptrs(c)\n _, lda = matmul._get_ld(a)\n _, ldc = matmul._get_ld(c)\n handle = cuda.Device().cublas_handle\n cuda.cublas.sgetrfBatched(\n handle, n, ap.data.ptr, lda, p.data.ptr, info.data.ptr, n_matrices)\n cuda.cublas.sgetriBatched(\n handle, n, ap.data.ptr, lda, p.data.ptr, cp.data.ptr, ldc,\n info.data.ptr, n_matrices)\n return c, info\n\n\nclass Inv(function_node.FunctionNode):\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n a_type, = in_types\n type_check.expect(a_type.dtype == numpy.float32)\n # Only 2D array shapes allowed\n type_check.expect(a_type.ndim == 2)\n # Matrix inversion only allowed for square matrices\n type_check.expect(a_type.shape[0] == a_type.shape[1])\n\n def forward_cpu(self, x):\n self.retain_outputs((0,))\n invx = utils.force_array(numpy.linalg.inv(x[0]))\n return invx,\n\n def forward_gpu(self, x):\n self.retain_outputs((0,))\n shape = x[0].shape\n invx, info = _inv_gpu(x[0].reshape(1, *shape))\n if chainer.is_debug():\n if cuda.cupy.any(info != 0):\n raise ValueError('Input has singular matrices.')\n invx = invx.reshape(shape)\n return invx,\n\n def backward(self, x, gy):\n invx, = self.get_retained_outputs()\n # Gradient is - x^-T (dx) x^-T\n invxT = chainer.functions.transpose(invx)\n gx = chainer.functions.matmul(\n chainer.functions.matmul(- invxT, gy[0]), invxT)\n return gx,\n\n\nclass BatchInv(function_node.FunctionNode):\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n a_type, = in_types\n type_check.expect(a_type.dtype == numpy.float32)\n # Only a minibatch of 2D array shapes allowed\n type_check.expect(a_type.ndim == 3)\n # Matrix inversion only allowed for square matrices\n # so assert the last two dimensions are equal\n type_check.expect(a_type.shape[-1] == a_type.shape[-2])\n\n def forward_cpu(self, x):\n self.retain_outputs((0,))\n invx = utils.force_array(numpy.linalg.inv(x[0]))\n return invx,\n\n def forward_gpu(self, x):\n self.retain_outputs((0,))\n invx, info = _inv_gpu(x[0])\n if chainer.is_debug():\n if cuda.cupy.any(info != 0):\n raise ValueError('Input has singular matrices.')\n return invx,\n\n def backward(self, x, gy):\n invx, = self.get_retained_outputs()\n # Unpack 1-length tuples\n gy, = gy\n # Gradient is - x^-T (dx) x^-T\n ret = chainer.functions.matmul(-invx, gy, transa=True)\n ret2 = chainer.functions.matmul(ret, invx, transb=True)\n return ret2,\n\n\ndef inv(a):\n \"\"\"Computes the inverse of square matrix.\n\n Args:\n a (Variable): Input array to compute the inverse for. Shape of\n the array should be ``(n, n)`` where ``n`` is the dimensionality of\n a square matrix.\n\n Returns:\n ~chainer.Variable: Matrix inverse of ``a``.\n \"\"\"\n return Inv().apply((a,))[0]\n\n\ndef batch_inv(a):\n \"\"\"Computes the inverse of a batch of square matrices.\n\n Args:\n a (Variable): Input array to compute the inverse for. Shape of\n the array should be ``(m, n, n)`` where ``m`` is the number of\n matrices in the batch, and ``n`` is the dimensionality of a square\n matrix.\n\n Returns:\n ~chainer.Variable: Inverse of every matrix in the batch of matrices.\n \"\"\"\n return BatchInv().apply((a,))[0]\n", "path": "chainer/functions/math/inv.py"}]}
1,825
318
gh_patches_debug_36518
rasdani/github-patches
git_diff
joke2k__faker-924
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enables min and max values for pydecimal Currently is not possible to set min or max values to `pydecimal` or `pyfloat`. It would be nice if we could pass these parameters. If it makes senses I can open a PR. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `faker/providers/python/__init__.py` Content: ``` 1 # coding=utf-8 2 3 from __future__ import unicode_literals 4 5 from decimal import Decimal 6 import sys 7 8 import six 9 10 from .. import BaseProvider 11 12 13 class Provider(BaseProvider): 14 def pybool(self): 15 return self.random_int(0, 1) == 1 16 17 def pystr(self, min_chars=None, max_chars=20): 18 """ 19 Generates a random string of upper and lowercase letters. 20 :type min_chars: int 21 :type max_chars: int 22 :return: String. Random of random length between min and max characters. 23 """ 24 if min_chars is None: 25 return "".join(self.random_letters(length=max_chars)) 26 else: 27 assert ( 28 max_chars >= min_chars), "Maximum length must be greater than or equal to minium length" 29 return "".join( 30 self.random_letters( 31 length=self.generator.random.randint(min_chars, max_chars), 32 ), 33 ) 34 35 def pyfloat(self, left_digits=None, right_digits=None, positive=False): 36 if left_digits is not None and left_digits < 0: 37 raise ValueError( 38 'A float number cannot have less than 0 digits in its ' 39 'integer part') 40 if right_digits is not None and right_digits < 0: 41 raise ValueError( 42 'A float number cannot have less than 0 digits in its ' 43 'fractional part') 44 if left_digits == 0 and right_digits == 0: 45 raise ValueError( 46 'A float number cannot have less than 0 digits in total') 47 48 left_digits = left_digits if left_digits is not None else ( 49 self.random_int(1, sys.float_info.dig)) 50 right_digits = right_digits if right_digits is not None else ( 51 self.random_int(0, sys.float_info.dig - left_digits)) 52 sign = 1 if positive else self.random_element((-1, 1)) 53 54 return float("{0}.{1}".format( 55 sign * self.random_number(left_digits), 56 self.random_number(right_digits), 57 )) 58 59 def pyint(self): 60 return self.generator.random_int() 61 62 def pydecimal(self, left_digits=None, right_digits=None, positive=False): 63 return Decimal(str(self.pyfloat(left_digits, right_digits, positive))) 64 65 def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types): 66 return tuple( 67 self.pyset( 68 nb_elements, 69 variable_nb_elements, 70 *value_types)) 71 72 def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types): 73 return set( 74 self._pyiterable( 75 nb_elements, 76 variable_nb_elements, 77 *value_types)) 78 79 def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types): 80 return list( 81 self._pyiterable( 82 nb_elements, 83 variable_nb_elements, 84 *value_types)) 85 86 def pyiterable( 87 self, 88 nb_elements=10, 89 variable_nb_elements=True, 90 *value_types): 91 return self.random_element([self.pylist, self.pytuple, self.pyset])( 92 nb_elements, variable_nb_elements, *value_types) 93 94 def _random_type(self, type_list): 95 value_type = self.random_element(type_list) 96 97 method_name = "py{0}".format(value_type) 98 if hasattr(self, method_name): 99 value_type = method_name 100 101 return self.generator.format(value_type) 102 103 def _pyiterable( 104 self, 105 nb_elements=10, 106 variable_nb_elements=True, 107 *value_types): 108 109 value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower() 110 for t in value_types 111 # avoid recursion 112 if t not in ['iterable', 'list', 'tuple', 'dict', 'set']] 113 if not value_types: 114 value_types = ['str', 'str', 'str', 'str', 'float', 115 'int', 'int', 'decimal', 'date_time', 'uri', 'email'] 116 117 if variable_nb_elements: 118 nb_elements = self.randomize_nb_elements(nb_elements, min=1) 119 120 for _ in range(nb_elements): 121 yield self._random_type(value_types) 122 123 def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types): 124 """ 125 Returns a dictionary. 126 127 :nb_elements: number of elements for dictionary 128 :variable_nb_elements: is use variable number of elements for dictionary 129 :value_types: type of dictionary values 130 """ 131 if variable_nb_elements: 132 nb_elements = self.randomize_nb_elements(nb_elements, min=1) 133 134 return dict(zip( 135 self.generator.words(nb_elements), 136 self._pyiterable(nb_elements, False, *value_types), 137 )) 138 139 def pystruct(self, count=10, *value_types): 140 141 value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower() 142 for t in value_types 143 # avoid recursion 144 if t != 'struct'] 145 if not value_types: 146 value_types = ['str', 'str', 'str', 'str', 'float', 147 'int', 'int', 'decimal', 'date_time', 'uri', 'email'] 148 149 types = [] 150 d = {} 151 nd = {} 152 for i in range(count): 153 d[self.generator.word()] = self._random_type(value_types) 154 types.append(self._random_type(value_types)) 155 nd[self.generator.word()] = {i: self._random_type(value_types), 156 i + 1: [self._random_type(value_types), 157 self._random_type(value_types), 158 self._random_type(value_types)], 159 i + 2: {i: self._random_type(value_types), 160 i + 1: self._random_type(value_types), 161 i + 2: [self._random_type(value_types), 162 self._random_type(value_types)]}} 163 return types, d, nd 164 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py --- a/faker/providers/python/__init__.py +++ b/faker/providers/python/__init__.py @@ -32,7 +32,9 @@ ), ) - def pyfloat(self, left_digits=None, right_digits=None, positive=False): + def pyfloat(self, left_digits=None, right_digits=None, positive=False, + min_value=None, max_value=None): + if left_digits is not None and left_digits < 0: raise ValueError( 'A float number cannot have less than 0 digits in its ' @@ -44,6 +46,8 @@ if left_digits == 0 and right_digits == 0: raise ValueError( 'A float number cannot have less than 0 digits in total') + if None not in (min_value, max_value) and min_value > max_value: + raise ValueError('Min value cannot be greater than max value') left_digits = left_digits if left_digits is not None else ( self.random_int(1, sys.float_info.dig)) @@ -51,16 +55,30 @@ self.random_int(0, sys.float_info.dig - left_digits)) sign = 1 if positive else self.random_element((-1, 1)) + if (min_value is not None) or (max_value is not None): + if min_value is None: + min_value = max_value - self.random_int() + if max_value is None: + max_value = min_value + self.random_int() + + left_number = self.random_int(min_value, max_value) + else: + left_number = sign * self.random_number(left_digits) + return float("{0}.{1}".format( - sign * self.random_number(left_digits), + left_number, self.random_number(right_digits), )) def pyint(self): return self.generator.random_int() - def pydecimal(self, left_digits=None, right_digits=None, positive=False): - return Decimal(str(self.pyfloat(left_digits, right_digits, positive))) + def pydecimal(self, left_digits=None, right_digits=None, positive=False, + min_value=None, max_value=None): + + float_ = self.pyfloat( + left_digits, right_digits, positive, min_value, max_value) + return Decimal(str(float_)) def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types): return tuple(
{"golden_diff": "diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py\n--- a/faker/providers/python/__init__.py\n+++ b/faker/providers/python/__init__.py\n@@ -32,7 +32,9 @@\n ),\n )\n \n- def pyfloat(self, left_digits=None, right_digits=None, positive=False):\n+ def pyfloat(self, left_digits=None, right_digits=None, positive=False,\n+ min_value=None, max_value=None):\n+\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n@@ -44,6 +46,8 @@\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n+ if None not in (min_value, max_value) and min_value > max_value:\n+ raise ValueError('Min value cannot be greater than max value')\n \n left_digits = left_digits if left_digits is not None else (\n self.random_int(1, sys.float_info.dig))\n@@ -51,16 +55,30 @@\n self.random_int(0, sys.float_info.dig - left_digits))\n sign = 1 if positive else self.random_element((-1, 1))\n \n+ if (min_value is not None) or (max_value is not None):\n+ if min_value is None:\n+ min_value = max_value - self.random_int()\n+ if max_value is None:\n+ max_value = min_value + self.random_int()\n+\n+ left_number = self.random_int(min_value, max_value)\n+ else:\n+ left_number = sign * self.random_number(left_digits)\n+\n return float(\"{0}.{1}\".format(\n- sign * self.random_number(left_digits),\n+ left_number,\n self.random_number(right_digits),\n ))\n \n def pyint(self):\n return self.generator.random_int()\n \n- def pydecimal(self, left_digits=None, right_digits=None, positive=False):\n- return Decimal(str(self.pyfloat(left_digits, right_digits, positive)))\n+ def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n+ min_value=None, max_value=None):\n+\n+ float_ = self.pyfloat(\n+ left_digits, right_digits, positive, min_value, max_value)\n+ return Decimal(str(float_))\n \n def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return tuple(\n", "issue": "Enables min and max values for pydecimal\nCurrently is not possible to set min or max values to `pydecimal` or `pyfloat`. It would be nice if we could pass these parameters.\r\n\r\nIf it makes senses I can open a PR.\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nfrom decimal import Decimal\nimport sys\n\nimport six\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n def pybool(self):\n return self.random_int(0, 1) == 1\n\n def pystr(self, min_chars=None, max_chars=20):\n \"\"\"\n Generates a random string of upper and lowercase letters.\n :type min_chars: int\n :type max_chars: int\n :return: String. Random of random length between min and max characters.\n \"\"\"\n if min_chars is None:\n return \"\".join(self.random_letters(length=max_chars))\n else:\n assert (\n max_chars >= min_chars), \"Maximum length must be greater than or equal to minium length\"\n return \"\".join(\n self.random_letters(\n length=self.generator.random.randint(min_chars, max_chars),\n ),\n )\n\n def pyfloat(self, left_digits=None, right_digits=None, positive=False):\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'fractional part')\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n\n left_digits = left_digits if left_digits is not None else (\n self.random_int(1, sys.float_info.dig))\n right_digits = right_digits if right_digits is not None else (\n self.random_int(0, sys.float_info.dig - left_digits))\n sign = 1 if positive else self.random_element((-1, 1))\n\n return float(\"{0}.{1}\".format(\n sign * self.random_number(left_digits),\n self.random_number(right_digits),\n ))\n\n def pyint(self):\n return self.generator.random_int()\n\n def pydecimal(self, left_digits=None, right_digits=None, positive=False):\n return Decimal(str(self.pyfloat(left_digits, right_digits, positive)))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return tuple(\n self.pyset(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return set(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return list(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n *value_types):\n return self.random_element([self.pylist, self.pytuple, self.pyset])(\n nb_elements, variable_nb_elements, *value_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = \"py{0}\".format(value_type)\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n *value_types):\n\n value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float',\n 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n for _ in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):\n \"\"\"\n Returns a dictionary.\n\n :nb_elements: number of elements for dictionary\n :variable_nb_elements: is use variable number of elements for dictionary\n :value_types: type of dictionary values\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n return dict(zip(\n self.generator.words(nb_elements),\n self._pyiterable(nb_elements, False, *value_types),\n ))\n\n def pystruct(self, count=10, *value_types):\n\n value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float',\n 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n types = []\n d = {}\n nd = {}\n for i in range(count):\n d[self.generator.word()] = self._random_type(value_types)\n types.append(self._random_type(value_types))\n nd[self.generator.word()] = {i: self._random_type(value_types),\n i + 1: [self._random_type(value_types),\n self._random_type(value_types),\n self._random_type(value_types)],\n i + 2: {i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [self._random_type(value_types),\n self._random_type(value_types)]}}\n return types, d, nd\n", "path": "faker/providers/python/__init__.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nfrom decimal import Decimal\nimport sys\n\nimport six\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n def pybool(self):\n return self.random_int(0, 1) == 1\n\n def pystr(self, min_chars=None, max_chars=20):\n \"\"\"\n Generates a random string of upper and lowercase letters.\n :type min_chars: int\n :type max_chars: int\n :return: String. Random of random length between min and max characters.\n \"\"\"\n if min_chars is None:\n return \"\".join(self.random_letters(length=max_chars))\n else:\n assert (\n max_chars >= min_chars), \"Maximum length must be greater than or equal to minium length\"\n return \"\".join(\n self.random_letters(\n length=self.generator.random.randint(min_chars, max_chars),\n ),\n )\n\n def pyfloat(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'fractional part')\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n if None not in (min_value, max_value) and min_value > max_value:\n raise ValueError('Min value cannot be greater than max value')\n\n left_digits = left_digits if left_digits is not None else (\n self.random_int(1, sys.float_info.dig))\n right_digits = right_digits if right_digits is not None else (\n self.random_int(0, sys.float_info.dig - left_digits))\n sign = 1 if positive else self.random_element((-1, 1))\n\n if (min_value is not None) or (max_value is not None):\n if min_value is None:\n min_value = max_value - self.random_int()\n if max_value is None:\n max_value = min_value + self.random_int()\n\n left_number = self.random_int(min_value, max_value)\n else:\n left_number = sign * self.random_number(left_digits)\n\n return float(\"{0}.{1}\".format(\n left_number,\n self.random_number(right_digits),\n ))\n\n def pyint(self):\n return self.generator.random_int()\n\n def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n float_ = self.pyfloat(\n left_digits, right_digits, positive, min_value, max_value)\n return Decimal(str(float_))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return tuple(\n self.pyset(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return set(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return list(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n *value_types):\n return self.random_element([self.pylist, self.pytuple, self.pyset])(\n nb_elements, variable_nb_elements, *value_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = \"py{0}\".format(value_type)\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n *value_types):\n\n value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float',\n 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n for _ in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):\n \"\"\"\n Returns a dictionary.\n\n :nb_elements: number of elements for dictionary\n :variable_nb_elements: is use variable number of elements for dictionary\n :value_types: type of dictionary values\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n return dict(zip(\n self.generator.words(nb_elements),\n self._pyiterable(nb_elements, False, *value_types),\n ))\n\n def pystruct(self, count=10, *value_types):\n\n value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float',\n 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n types = []\n d = {}\n nd = {}\n for i in range(count):\n d[self.generator.word()] = self._random_type(value_types)\n types.append(self._random_type(value_types))\n nd[self.generator.word()] = {i: self._random_type(value_types),\n i + 1: [self._random_type(value_types),\n self._random_type(value_types),\n self._random_type(value_types)],\n i + 2: {i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [self._random_type(value_types),\n self._random_type(value_types)]}}\n return types, d, nd\n", "path": "faker/providers/python/__init__.py"}]}
1,987
558
gh_patches_debug_16100
rasdani/github-patches
git_diff
dask__distributed-4963
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add bar chart for worker occupancy We currently have bar charts for worker memory and the number of tasks processing. A similar chart to number of tasks processing would be worker occupancy, which records the sum of the amount of compute time of all tasks currently residing on the worker. This would be exactly like the number of tasks processing chart, but use `ws.occupancy` rather than `len(ws.processing)` I would have used this today when trying to diagnose work stealing issues. The relevant code for the number of processing tasks is here. It's made slightly complex because the code for this chart is currently merged with the memory chart. It might make sense to break these apart. https://github.com/dask/distributed/blob/9d4f0bf2fc804f955a869febd3b51423c4382908/distributed/dashboard/components/scheduler.py#L1017-L1139 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `distributed/dashboard/scheduler.py` Content: ``` 1 from urllib.parse import urljoin 2 3 from tornado import web 4 from tornado.ioloop import IOLoop 5 6 try: 7 import numpy as np 8 except ImportError: 9 np = False 10 11 from .components.nvml import gpu_doc # noqa: 1708 12 from .components.nvml import NVML_ENABLED, gpu_memory_doc, gpu_utilization_doc 13 from .components.scheduler import ( 14 AggregateAction, 15 BandwidthTypes, 16 BandwidthWorkers, 17 ComputePerKey, 18 CurrentLoad, 19 MemoryByKey, 20 NBytes, 21 NBytesCluster, 22 SystemMonitor, 23 TaskGraph, 24 TaskProgress, 25 TaskStream, 26 WorkerTable, 27 events_doc, 28 graph_doc, 29 individual_doc, 30 individual_profile_doc, 31 individual_profile_server_doc, 32 profile_doc, 33 profile_server_doc, 34 status_doc, 35 stealing_doc, 36 systemmonitor_doc, 37 tasks_doc, 38 workers_doc, 39 ) 40 from .core import BokehApplication 41 from .worker import counters_doc 42 43 template_variables = { 44 "pages": ["status", "workers", "tasks", "system", "profile", "graph", "info"] 45 } 46 47 if NVML_ENABLED: 48 template_variables["pages"].insert(4, "gpu") 49 50 51 def connect(application, http_server, scheduler, prefix=""): 52 bokeh_app = BokehApplication( 53 applications, scheduler, prefix=prefix, template_variables=template_variables 54 ) 55 application.add_application(bokeh_app) 56 bokeh_app.initialize(IOLoop.current()) 57 58 bokeh_app.add_handlers( 59 r".*", 60 [ 61 ( 62 r"/", 63 web.RedirectHandler, 64 {"url": urljoin((prefix or "").strip("/") + "/", r"status")}, 65 ) 66 ], 67 ) 68 69 70 applications = { 71 "/system": systemmonitor_doc, 72 "/stealing": stealing_doc, 73 "/workers": workers_doc, 74 "/events": events_doc, 75 "/counters": counters_doc, 76 "/tasks": tasks_doc, 77 "/status": status_doc, 78 "/profile": profile_doc, 79 "/profile-server": profile_server_doc, 80 "/graph": graph_doc, 81 "/gpu": gpu_doc, 82 "/individual-task-stream": individual_doc( 83 TaskStream, 100, n_rectangles=1000, clear_interval="10s" 84 ), 85 "/individual-progress": individual_doc(TaskProgress, 100, height=160), 86 "/individual-graph": individual_doc(TaskGraph, 200), 87 "/individual-nbytes": individual_doc(NBytes, 100), 88 "/individual-nbytes-cluster": individual_doc(NBytesCluster, 100), 89 "/individual-cpu": individual_doc(CurrentLoad, 100, fig_attr="cpu_figure"), 90 "/individual-nprocessing": individual_doc( 91 CurrentLoad, 100, fig_attr="processing_figure" 92 ), 93 "/individual-workers": individual_doc(WorkerTable, 500), 94 "/individual-bandwidth-types": individual_doc(BandwidthTypes, 500), 95 "/individual-bandwidth-workers": individual_doc(BandwidthWorkers, 500), 96 "/individual-memory-by-key": individual_doc(MemoryByKey, 500), 97 "/individual-compute-time-per-key": individual_doc(ComputePerKey, 500), 98 "/individual-aggregate-time-per-action": individual_doc(AggregateAction, 500), 99 "/individual-scheduler-system": individual_doc(SystemMonitor, 500), 100 "/individual-profile": individual_profile_doc, 101 "/individual-profile-server": individual_profile_server_doc, 102 "/individual-gpu-memory": gpu_memory_doc, 103 "/individual-gpu-utilization": gpu_utilization_doc, 104 } 105 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/distributed/dashboard/scheduler.py b/distributed/dashboard/scheduler.py --- a/distributed/dashboard/scheduler.py +++ b/distributed/dashboard/scheduler.py @@ -19,6 +19,7 @@ MemoryByKey, NBytes, NBytesCluster, + Occupancy, SystemMonitor, TaskGraph, TaskProgress, @@ -90,6 +91,7 @@ "/individual-nprocessing": individual_doc( CurrentLoad, 100, fig_attr="processing_figure" ), + "/individual-occupancy": individual_doc(Occupancy, 100), "/individual-workers": individual_doc(WorkerTable, 500), "/individual-bandwidth-types": individual_doc(BandwidthTypes, 500), "/individual-bandwidth-workers": individual_doc(BandwidthWorkers, 500),
{"golden_diff": "diff --git a/distributed/dashboard/scheduler.py b/distributed/dashboard/scheduler.py\n--- a/distributed/dashboard/scheduler.py\n+++ b/distributed/dashboard/scheduler.py\n@@ -19,6 +19,7 @@\n MemoryByKey,\n NBytes,\n NBytesCluster,\n+ Occupancy,\n SystemMonitor,\n TaskGraph,\n TaskProgress,\n@@ -90,6 +91,7 @@\n \"/individual-nprocessing\": individual_doc(\n CurrentLoad, 100, fig_attr=\"processing_figure\"\n ),\n+ \"/individual-occupancy\": individual_doc(Occupancy, 100),\n \"/individual-workers\": individual_doc(WorkerTable, 500),\n \"/individual-bandwidth-types\": individual_doc(BandwidthTypes, 500),\n \"/individual-bandwidth-workers\": individual_doc(BandwidthWorkers, 500),\n", "issue": "Add bar chart for worker occupancy\nWe currently have bar charts for worker memory and the number of tasks processing. A similar chart to number of tasks processing would be worker occupancy, which records the sum of the amount of compute time of all tasks currently residing on the worker. This would be exactly like the number of tasks processing chart, but use `ws.occupancy` rather than `len(ws.processing)`\r\n\r\nI would have used this today when trying to diagnose work stealing issues. \r\n\r\nThe relevant code for the number of processing tasks is here. It's made slightly complex because the code for this chart is currently merged with the memory chart. It might make sense to break these apart.\r\n\r\nhttps://github.com/dask/distributed/blob/9d4f0bf2fc804f955a869febd3b51423c4382908/distributed/dashboard/components/scheduler.py#L1017-L1139\n", "before_files": [{"content": "from urllib.parse import urljoin\n\nfrom tornado import web\nfrom tornado.ioloop import IOLoop\n\ntry:\n import numpy as np\nexcept ImportError:\n np = False\n\nfrom .components.nvml import gpu_doc # noqa: 1708\nfrom .components.nvml import NVML_ENABLED, gpu_memory_doc, gpu_utilization_doc\nfrom .components.scheduler import (\n AggregateAction,\n BandwidthTypes,\n BandwidthWorkers,\n ComputePerKey,\n CurrentLoad,\n MemoryByKey,\n NBytes,\n NBytesCluster,\n SystemMonitor,\n TaskGraph,\n TaskProgress,\n TaskStream,\n WorkerTable,\n events_doc,\n graph_doc,\n individual_doc,\n individual_profile_doc,\n individual_profile_server_doc,\n profile_doc,\n profile_server_doc,\n status_doc,\n stealing_doc,\n systemmonitor_doc,\n tasks_doc,\n workers_doc,\n)\nfrom .core import BokehApplication\nfrom .worker import counters_doc\n\ntemplate_variables = {\n \"pages\": [\"status\", \"workers\", \"tasks\", \"system\", \"profile\", \"graph\", \"info\"]\n}\n\nif NVML_ENABLED:\n template_variables[\"pages\"].insert(4, \"gpu\")\n\n\ndef connect(application, http_server, scheduler, prefix=\"\"):\n bokeh_app = BokehApplication(\n applications, scheduler, prefix=prefix, template_variables=template_variables\n )\n application.add_application(bokeh_app)\n bokeh_app.initialize(IOLoop.current())\n\n bokeh_app.add_handlers(\n r\".*\",\n [\n (\n r\"/\",\n web.RedirectHandler,\n {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n )\n ],\n )\n\n\napplications = {\n \"/system\": systemmonitor_doc,\n \"/stealing\": stealing_doc,\n \"/workers\": workers_doc,\n \"/events\": events_doc,\n \"/counters\": counters_doc,\n \"/tasks\": tasks_doc,\n \"/status\": status_doc,\n \"/profile\": profile_doc,\n \"/profile-server\": profile_server_doc,\n \"/graph\": graph_doc,\n \"/gpu\": gpu_doc,\n \"/individual-task-stream\": individual_doc(\n TaskStream, 100, n_rectangles=1000, clear_interval=\"10s\"\n ),\n \"/individual-progress\": individual_doc(TaskProgress, 100, height=160),\n \"/individual-graph\": individual_doc(TaskGraph, 200),\n \"/individual-nbytes\": individual_doc(NBytes, 100),\n \"/individual-nbytes-cluster\": individual_doc(NBytesCluster, 100),\n \"/individual-cpu\": individual_doc(CurrentLoad, 100, fig_attr=\"cpu_figure\"),\n \"/individual-nprocessing\": individual_doc(\n CurrentLoad, 100, fig_attr=\"processing_figure\"\n ),\n \"/individual-workers\": individual_doc(WorkerTable, 500),\n \"/individual-bandwidth-types\": individual_doc(BandwidthTypes, 500),\n \"/individual-bandwidth-workers\": individual_doc(BandwidthWorkers, 500),\n \"/individual-memory-by-key\": individual_doc(MemoryByKey, 500),\n \"/individual-compute-time-per-key\": individual_doc(ComputePerKey, 500),\n \"/individual-aggregate-time-per-action\": individual_doc(AggregateAction, 500),\n \"/individual-scheduler-system\": individual_doc(SystemMonitor, 500),\n \"/individual-profile\": individual_profile_doc,\n \"/individual-profile-server\": individual_profile_server_doc,\n \"/individual-gpu-memory\": gpu_memory_doc,\n \"/individual-gpu-utilization\": gpu_utilization_doc,\n}\n", "path": "distributed/dashboard/scheduler.py"}], "after_files": [{"content": "from urllib.parse import urljoin\n\nfrom tornado import web\nfrom tornado.ioloop import IOLoop\n\ntry:\n import numpy as np\nexcept ImportError:\n np = False\n\nfrom .components.nvml import gpu_doc # noqa: 1708\nfrom .components.nvml import NVML_ENABLED, gpu_memory_doc, gpu_utilization_doc\nfrom .components.scheduler import (\n AggregateAction,\n BandwidthTypes,\n BandwidthWorkers,\n ComputePerKey,\n CurrentLoad,\n MemoryByKey,\n NBytes,\n NBytesCluster,\n Occupancy,\n SystemMonitor,\n TaskGraph,\n TaskProgress,\n TaskStream,\n WorkerTable,\n events_doc,\n graph_doc,\n individual_doc,\n individual_profile_doc,\n individual_profile_server_doc,\n profile_doc,\n profile_server_doc,\n status_doc,\n stealing_doc,\n systemmonitor_doc,\n tasks_doc,\n workers_doc,\n)\nfrom .core import BokehApplication\nfrom .worker import counters_doc\n\ntemplate_variables = {\n \"pages\": [\"status\", \"workers\", \"tasks\", \"system\", \"profile\", \"graph\", \"info\"]\n}\n\nif NVML_ENABLED:\n template_variables[\"pages\"].insert(4, \"gpu\")\n\n\ndef connect(application, http_server, scheduler, prefix=\"\"):\n bokeh_app = BokehApplication(\n applications, scheduler, prefix=prefix, template_variables=template_variables\n )\n application.add_application(bokeh_app)\n bokeh_app.initialize(IOLoop.current())\n\n bokeh_app.add_handlers(\n r\".*\",\n [\n (\n r\"/\",\n web.RedirectHandler,\n {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n )\n ],\n )\n\n\napplications = {\n \"/system\": systemmonitor_doc,\n \"/stealing\": stealing_doc,\n \"/workers\": workers_doc,\n \"/events\": events_doc,\n \"/counters\": counters_doc,\n \"/tasks\": tasks_doc,\n \"/status\": status_doc,\n \"/profile\": profile_doc,\n \"/profile-server\": profile_server_doc,\n \"/graph\": graph_doc,\n \"/gpu\": gpu_doc,\n \"/individual-task-stream\": individual_doc(\n TaskStream, 100, n_rectangles=1000, clear_interval=\"10s\"\n ),\n \"/individual-progress\": individual_doc(TaskProgress, 100, height=160),\n \"/individual-graph\": individual_doc(TaskGraph, 200),\n \"/individual-nbytes\": individual_doc(NBytes, 100),\n \"/individual-nbytes-cluster\": individual_doc(NBytesCluster, 100),\n \"/individual-cpu\": individual_doc(CurrentLoad, 100, fig_attr=\"cpu_figure\"),\n \"/individual-nprocessing\": individual_doc(\n CurrentLoad, 100, fig_attr=\"processing_figure\"\n ),\n \"/individual-occupancy\": individual_doc(Occupancy, 100),\n \"/individual-workers\": individual_doc(WorkerTable, 500),\n \"/individual-bandwidth-types\": individual_doc(BandwidthTypes, 500),\n \"/individual-bandwidth-workers\": individual_doc(BandwidthWorkers, 500),\n \"/individual-memory-by-key\": individual_doc(MemoryByKey, 500),\n \"/individual-compute-time-per-key\": individual_doc(ComputePerKey, 500),\n \"/individual-aggregate-time-per-action\": individual_doc(AggregateAction, 500),\n \"/individual-scheduler-system\": individual_doc(SystemMonitor, 500),\n \"/individual-profile\": individual_profile_doc,\n \"/individual-profile-server\": individual_profile_server_doc,\n \"/individual-gpu-memory\": gpu_memory_doc,\n \"/individual-gpu-utilization\": gpu_utilization_doc,\n}\n", "path": "distributed/dashboard/scheduler.py"}]}
1,460
190
gh_patches_debug_39285
rasdani/github-patches
git_diff
PrefectHQ__prefect-2310
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `ifelse` should behave like a ternary expression Currently, our conditional `ifelse` operator just sets dependencies in a particular way, but always returns `None`. It would feel much more natural if it returned a task object whose value at runtime behaved like a standard `x = condition ? if_true : if_false` expression --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/prefect/tasks/control_flow/conditional.py` Content: ``` 1 from typing import Any, Dict 2 3 import prefect 4 from prefect import Task 5 from prefect.engine import signals 6 7 __all__ = ["switch", "ifelse"] 8 9 10 class Merge(Task): 11 def __init__(self, **kwargs) -> None: 12 if kwargs.setdefault("skip_on_upstream_skip", False): 13 raise ValueError("Merge tasks must have `skip_on_upstream_skip=False`.") 14 kwargs.setdefault("trigger", prefect.triggers.not_all_skipped) 15 super().__init__(**kwargs) 16 17 def run(self, **task_results: Any) -> Any: 18 return next( 19 (v for k, v in sorted(task_results.items()) if v is not None), None, 20 ) 21 22 23 class CompareValue(Task): 24 """ 25 This task stores a `value` at initialization and compares it to a `value` received at runtime. 26 If the values don't match, it raises a SKIP exception. 27 28 Args: 29 - value (Any): the value this task will attempt to match when it runs 30 - **kwargs: keyword arguments for the Task 31 """ 32 33 def __init__(self, value: Any, **kwargs: Any): 34 self.value = value 35 kwargs.setdefault("name", 'CompareValue: "{}"'.format(value)) 36 super().__init__(**kwargs) 37 38 def run(self, value: Any) -> None: 39 """ 40 Raises a SKIP signal if the passed value does not match the task's match value; 41 succeeds silently otherwise. 42 43 Args: 44 - value (Any): the value that will be matched against the task's value. 45 """ 46 if value != self.value: 47 raise signals.SKIP( 48 'Provided value "{}" did not match "{}"'.format(value, self.value) 49 ) 50 51 52 def switch(condition: Task, cases: Dict[Any, Task]) -> None: 53 """ 54 Adds a SWITCH to a workflow. 55 56 The condition task is evaluated and the result is compared to the keys of the cases 57 dictionary. The task corresponding to the matching key is run; all other tasks are 58 skipped. Any tasks downstream of the skipped tasks are also skipped unless they set 59 `skip_on_upstream_skip=False`. 60 61 Example: 62 ```python 63 @task 64 def condition(): 65 return "b" # returning 'b' will take the b_branch 66 67 @task 68 def a_branch(): 69 return "A Branch" 70 71 @task 72 def b_branch(): 73 return "B Branch" 74 75 with Flow("switch-flow") as flow: 76 switch(condition, dict(a=a_branch, b=b_branch)) 77 ``` 78 79 Args: 80 - condition (Task): a task whose result forms the condition for the switch 81 - cases (Dict[Any, Task]): a dict representing the "case" statements of the switch. 82 The value of the `condition` task will be compared to the keys of this dict, and 83 the matching task will be executed. 84 85 Raises: 86 - PrefectWarning: if any of the tasks in "cases" have upstream dependencies, 87 then this task will warn that those upstream tasks may run whether or not the switch condition matches their branch. The most common cause of this 88 is passing a list of tasks as one of the cases, which adds the `List` task 89 to the switch condition but leaves the tasks themselves upstream. 90 """ 91 92 with prefect.tags("switch"): 93 for value, task in cases.items(): 94 task = prefect.utilities.tasks.as_task(task) 95 match_condition = CompareValue(value=value).bind(value=condition) 96 task.set_dependencies(upstream_tasks=[match_condition]) 97 98 99 def ifelse(condition: Task, true_task: Task, false_task: Task) -> None: 100 """ 101 Builds a conditional branch into a workflow. 102 103 If the condition evaluates True(ish), the true_task will run. If it 104 evaluates False(ish), the false_task will run. The task doesn't run is Skipped, as are 105 all downstream tasks that don't set `skip_on_upstream_skip=False`. 106 107 Args: 108 - condition (Task): a task whose boolean result forms the condition for the ifelse 109 - true_task (Task): a task that will be executed if the condition is True 110 - false_task (Task): a task that will be executed if the condition is False 111 """ 112 113 @prefect.task 114 def as_bool(x): 115 return bool(x) 116 117 switch(condition=as_bool(condition), cases={True: true_task, False: false_task}) 118 119 120 def merge(*tasks: Task) -> Task: 121 """ 122 Merges conditional branches back together. 123 124 A conditional branch in a flow results in one or more tasks proceeding and one or 125 more tasks skipping. It is often convenient to merge those branches back into a 126 single result. This function is a simple way to achieve that goal. By default this 127 task will skip if all its upstream dependencies are also skipped. 128 129 The merge will return the first real result it encounters, or `None`. If multiple 130 tasks might return a result, group them with a list. 131 132 Example: 133 ```python 134 with Flow("My Flow"): 135 true_branch = ActionIfTrue() 136 false_branch = ActionIfFalse() 137 ifelse(CheckCondition(), true_branch, false_branch) 138 139 merged_result = merge(true_branch, false_branch) 140 ``` 141 142 Args: 143 - *tasks (Task): tasks whose results should be merged into a single result. The tasks are 144 assumed to all sit downstream of different `switch` branches, such that only 145 one of them will contain a result and the others will all be skipped. 146 147 Returns: 148 - Task: a Task representing the merged result. 149 150 """ 151 return Merge().bind(**{"task_{}".format(i + 1): t for i, t in enumerate(tasks)}) 152 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/prefect/tasks/control_flow/conditional.py b/src/prefect/tasks/control_flow/conditional.py --- a/src/prefect/tasks/control_flow/conditional.py +++ b/src/prefect/tasks/control_flow/conditional.py @@ -82,11 +82,16 @@ The value of the `condition` task will be compared to the keys of this dict, and the matching task will be executed. + Returns: + - Task: a task whose result is the output from the task executed by this switch + Raises: - PrefectWarning: if any of the tasks in "cases" have upstream dependencies, - then this task will warn that those upstream tasks may run whether or not the switch condition matches their branch. The most common cause of this - is passing a list of tasks as one of the cases, which adds the `List` task - to the switch condition but leaves the tasks themselves upstream. + then this task will warn that those upstream tasks may run whether + or not the switch condition matches their branch. The most common + cause of this is passing a list of tasks as one of the cases, which + adds the `List` task to the switch condition but leaves the tasks + themselves upstream. """ with prefect.tags("switch"): @@ -94,6 +99,7 @@ task = prefect.utilities.tasks.as_task(task) match_condition = CompareValue(value=value).bind(value=condition) task.set_dependencies(upstream_tasks=[match_condition]) + return merge(*cases.values()) def ifelse(condition: Task, true_task: Task, false_task: Task) -> None: @@ -101,20 +107,26 @@ Builds a conditional branch into a workflow. If the condition evaluates True(ish), the true_task will run. If it - evaluates False(ish), the false_task will run. The task doesn't run is Skipped, as are - all downstream tasks that don't set `skip_on_upstream_skip=False`. + evaluates False(ish), the false_task will run. The task that doesn't run is + Skipped, as are all downstream tasks that don't set + `skip_on_upstream_skip=False`. Args: - condition (Task): a task whose boolean result forms the condition for the ifelse - true_task (Task): a task that will be executed if the condition is True - false_task (Task): a task that will be executed if the condition is False + + Returns: + - Task: a task whose result is the output from the task executed by this ifelse """ @prefect.task def as_bool(x): return bool(x) - switch(condition=as_bool(condition), cases={True: true_task, False: false_task}) + return switch( + condition=as_bool(condition), cases={True: true_task, False: false_task} + ) def merge(*tasks: Task) -> Task:
{"golden_diff": "diff --git a/src/prefect/tasks/control_flow/conditional.py b/src/prefect/tasks/control_flow/conditional.py\n--- a/src/prefect/tasks/control_flow/conditional.py\n+++ b/src/prefect/tasks/control_flow/conditional.py\n@@ -82,11 +82,16 @@\n The value of the `condition` task will be compared to the keys of this dict, and\n the matching task will be executed.\n \n+ Returns:\n+ - Task: a task whose result is the output from the task executed by this switch\n+\n Raises:\n - PrefectWarning: if any of the tasks in \"cases\" have upstream dependencies,\n- then this task will warn that those upstream tasks may run whether or not the switch condition matches their branch. The most common cause of this\n- is passing a list of tasks as one of the cases, which adds the `List` task\n- to the switch condition but leaves the tasks themselves upstream.\n+ then this task will warn that those upstream tasks may run whether\n+ or not the switch condition matches their branch. The most common\n+ cause of this is passing a list of tasks as one of the cases, which\n+ adds the `List` task to the switch condition but leaves the tasks\n+ themselves upstream.\n \"\"\"\n \n with prefect.tags(\"switch\"):\n@@ -94,6 +99,7 @@\n task = prefect.utilities.tasks.as_task(task)\n match_condition = CompareValue(value=value).bind(value=condition)\n task.set_dependencies(upstream_tasks=[match_condition])\n+ return merge(*cases.values())\n \n \n def ifelse(condition: Task, true_task: Task, false_task: Task) -> None:\n@@ -101,20 +107,26 @@\n Builds a conditional branch into a workflow.\n \n If the condition evaluates True(ish), the true_task will run. If it\n- evaluates False(ish), the false_task will run. The task doesn't run is Skipped, as are\n- all downstream tasks that don't set `skip_on_upstream_skip=False`.\n+ evaluates False(ish), the false_task will run. The task that doesn't run is\n+ Skipped, as are all downstream tasks that don't set\n+ `skip_on_upstream_skip=False`.\n \n Args:\n - condition (Task): a task whose boolean result forms the condition for the ifelse\n - true_task (Task): a task that will be executed if the condition is True\n - false_task (Task): a task that will be executed if the condition is False\n+\n+ Returns:\n+ - Task: a task whose result is the output from the task executed by this ifelse\n \"\"\"\n \n @prefect.task\n def as_bool(x):\n return bool(x)\n \n- switch(condition=as_bool(condition), cases={True: true_task, False: false_task})\n+ return switch(\n+ condition=as_bool(condition), cases={True: true_task, False: false_task}\n+ )\n \n \n def merge(*tasks: Task) -> Task:\n", "issue": "`ifelse` should behave like a ternary expression\nCurrently, our conditional `ifelse` operator just sets dependencies in a particular way, but always returns `None`. It would feel much more natural if it returned a task object whose value at runtime behaved like a standard `x = condition ? if_true : if_false` expression\n", "before_files": [{"content": "from typing import Any, Dict\n\nimport prefect\nfrom prefect import Task\nfrom prefect.engine import signals\n\n__all__ = [\"switch\", \"ifelse\"]\n\n\nclass Merge(Task):\n def __init__(self, **kwargs) -> None:\n if kwargs.setdefault(\"skip_on_upstream_skip\", False):\n raise ValueError(\"Merge tasks must have `skip_on_upstream_skip=False`.\")\n kwargs.setdefault(\"trigger\", prefect.triggers.not_all_skipped)\n super().__init__(**kwargs)\n\n def run(self, **task_results: Any) -> Any:\n return next(\n (v for k, v in sorted(task_results.items()) if v is not None), None,\n )\n\n\nclass CompareValue(Task):\n \"\"\"\n This task stores a `value` at initialization and compares it to a `value` received at runtime.\n If the values don't match, it raises a SKIP exception.\n\n Args:\n - value (Any): the value this task will attempt to match when it runs\n - **kwargs: keyword arguments for the Task\n \"\"\"\n\n def __init__(self, value: Any, **kwargs: Any):\n self.value = value\n kwargs.setdefault(\"name\", 'CompareValue: \"{}\"'.format(value))\n super().__init__(**kwargs)\n\n def run(self, value: Any) -> None:\n \"\"\"\n Raises a SKIP signal if the passed value does not match the task's match value;\n succeeds silently otherwise.\n\n Args:\n - value (Any): the value that will be matched against the task's value.\n \"\"\"\n if value != self.value:\n raise signals.SKIP(\n 'Provided value \"{}\" did not match \"{}\"'.format(value, self.value)\n )\n\n\ndef switch(condition: Task, cases: Dict[Any, Task]) -> None:\n \"\"\"\n Adds a SWITCH to a workflow.\n\n The condition task is evaluated and the result is compared to the keys of the cases\n dictionary. The task corresponding to the matching key is run; all other tasks are\n skipped. Any tasks downstream of the skipped tasks are also skipped unless they set\n `skip_on_upstream_skip=False`.\n\n Example:\n ```python\n @task\n def condition():\n return \"b\" # returning 'b' will take the b_branch\n\n @task\n def a_branch():\n return \"A Branch\"\n\n @task\n def b_branch():\n return \"B Branch\"\n\n with Flow(\"switch-flow\") as flow:\n switch(condition, dict(a=a_branch, b=b_branch))\n ```\n\n Args:\n - condition (Task): a task whose result forms the condition for the switch\n - cases (Dict[Any, Task]): a dict representing the \"case\" statements of the switch.\n The value of the `condition` task will be compared to the keys of this dict, and\n the matching task will be executed.\n\n Raises:\n - PrefectWarning: if any of the tasks in \"cases\" have upstream dependencies,\n then this task will warn that those upstream tasks may run whether or not the switch condition matches their branch. The most common cause of this\n is passing a list of tasks as one of the cases, which adds the `List` task\n to the switch condition but leaves the tasks themselves upstream.\n \"\"\"\n\n with prefect.tags(\"switch\"):\n for value, task in cases.items():\n task = prefect.utilities.tasks.as_task(task)\n match_condition = CompareValue(value=value).bind(value=condition)\n task.set_dependencies(upstream_tasks=[match_condition])\n\n\ndef ifelse(condition: Task, true_task: Task, false_task: Task) -> None:\n \"\"\"\n Builds a conditional branch into a workflow.\n\n If the condition evaluates True(ish), the true_task will run. If it\n evaluates False(ish), the false_task will run. The task doesn't run is Skipped, as are\n all downstream tasks that don't set `skip_on_upstream_skip=False`.\n\n Args:\n - condition (Task): a task whose boolean result forms the condition for the ifelse\n - true_task (Task): a task that will be executed if the condition is True\n - false_task (Task): a task that will be executed if the condition is False\n \"\"\"\n\n @prefect.task\n def as_bool(x):\n return bool(x)\n\n switch(condition=as_bool(condition), cases={True: true_task, False: false_task})\n\n\ndef merge(*tasks: Task) -> Task:\n \"\"\"\n Merges conditional branches back together.\n\n A conditional branch in a flow results in one or more tasks proceeding and one or\n more tasks skipping. It is often convenient to merge those branches back into a\n single result. This function is a simple way to achieve that goal. By default this\n task will skip if all its upstream dependencies are also skipped.\n\n The merge will return the first real result it encounters, or `None`. If multiple\n tasks might return a result, group them with a list.\n\n Example:\n ```python\n with Flow(\"My Flow\"):\n true_branch = ActionIfTrue()\n false_branch = ActionIfFalse()\n ifelse(CheckCondition(), true_branch, false_branch)\n\n merged_result = merge(true_branch, false_branch)\n ```\n\n Args:\n - *tasks (Task): tasks whose results should be merged into a single result. The tasks are\n assumed to all sit downstream of different `switch` branches, such that only\n one of them will contain a result and the others will all be skipped.\n\n Returns:\n - Task: a Task representing the merged result.\n\n \"\"\"\n return Merge().bind(**{\"task_{}\".format(i + 1): t for i, t in enumerate(tasks)})\n", "path": "src/prefect/tasks/control_flow/conditional.py"}], "after_files": [{"content": "from typing import Any, Dict\n\nimport prefect\nfrom prefect import Task\nfrom prefect.engine import signals\n\n__all__ = [\"switch\", \"ifelse\"]\n\n\nclass Merge(Task):\n def __init__(self, **kwargs) -> None:\n if kwargs.setdefault(\"skip_on_upstream_skip\", False):\n raise ValueError(\"Merge tasks must have `skip_on_upstream_skip=False`.\")\n kwargs.setdefault(\"trigger\", prefect.triggers.not_all_skipped)\n super().__init__(**kwargs)\n\n def run(self, **task_results: Any) -> Any:\n return next(\n (v for k, v in sorted(task_results.items()) if v is not None), None,\n )\n\n\nclass CompareValue(Task):\n \"\"\"\n This task stores a `value` at initialization and compares it to a `value` received at runtime.\n If the values don't match, it raises a SKIP exception.\n\n Args:\n - value (Any): the value this task will attempt to match when it runs\n - **kwargs: keyword arguments for the Task\n \"\"\"\n\n def __init__(self, value: Any, **kwargs: Any):\n self.value = value\n kwargs.setdefault(\"name\", 'CompareValue: \"{}\"'.format(value))\n super().__init__(**kwargs)\n\n def run(self, value: Any) -> None:\n \"\"\"\n Raises a SKIP signal if the passed value does not match the task's match value;\n succeeds silently otherwise.\n\n Args:\n - value (Any): the value that will be matched against the task's value.\n \"\"\"\n if value != self.value:\n raise signals.SKIP(\n 'Provided value \"{}\" did not match \"{}\"'.format(value, self.value)\n )\n\n\ndef switch(condition: Task, cases: Dict[Any, Task]) -> None:\n \"\"\"\n Adds a SWITCH to a workflow.\n\n The condition task is evaluated and the result is compared to the keys of the cases\n dictionary. The task corresponding to the matching key is run; all other tasks are\n skipped. Any tasks downstream of the skipped tasks are also skipped unless they set\n `skip_on_upstream_skip=False`.\n\n Example:\n ```python\n @task\n def condition():\n return \"b\" # returning 'b' will take the b_branch\n\n @task\n def a_branch():\n return \"A Branch\"\n\n @task\n def b_branch():\n return \"B Branch\"\n\n with Flow(\"switch-flow\") as flow:\n switch(condition, dict(a=a_branch, b=b_branch))\n ```\n\n Args:\n - condition (Task): a task whose result forms the condition for the switch\n - cases (Dict[Any, Task]): a dict representing the \"case\" statements of the switch.\n The value of the `condition` task will be compared to the keys of this dict, and\n the matching task will be executed.\n\n Returns:\n - Task: a task whose result is the output from the task executed by this switch\n\n Raises:\n - PrefectWarning: if any of the tasks in \"cases\" have upstream dependencies,\n then this task will warn that those upstream tasks may run whether\n or not the switch condition matches their branch. The most common\n cause of this is passing a list of tasks as one of the cases, which\n adds the `List` task to the switch condition but leaves the tasks\n themselves upstream.\n \"\"\"\n\n with prefect.tags(\"switch\"):\n for value, task in cases.items():\n task = prefect.utilities.tasks.as_task(task)\n match_condition = CompareValue(value=value).bind(value=condition)\n task.set_dependencies(upstream_tasks=[match_condition])\n return merge(*cases.values())\n\n\ndef ifelse(condition: Task, true_task: Task, false_task: Task) -> None:\n \"\"\"\n Builds a conditional branch into a workflow.\n\n If the condition evaluates True(ish), the true_task will run. If it\n evaluates False(ish), the false_task will run. The task that doesn't run is\n Skipped, as are all downstream tasks that don't set\n `skip_on_upstream_skip=False`.\n\n Args:\n - condition (Task): a task whose boolean result forms the condition for the ifelse\n - true_task (Task): a task that will be executed if the condition is True\n - false_task (Task): a task that will be executed if the condition is False\n\n Returns:\n - Task: a task whose result is the output from the task executed by this ifelse\n \"\"\"\n\n @prefect.task\n def as_bool(x):\n return bool(x)\n\n return switch(\n condition=as_bool(condition), cases={True: true_task, False: false_task}\n )\n\n\ndef merge(*tasks: Task) -> Task:\n \"\"\"\n Merges conditional branches back together.\n\n A conditional branch in a flow results in one or more tasks proceeding and one or\n more tasks skipping. It is often convenient to merge those branches back into a\n single result. This function is a simple way to achieve that goal. By default this\n task will skip if all its upstream dependencies are also skipped.\n\n The merge will return the first real result it encounters, or `None`. If multiple\n tasks might return a result, group them with a list.\n\n Example:\n ```python\n with Flow(\"My Flow\"):\n true_branch = ActionIfTrue()\n false_branch = ActionIfFalse()\n ifelse(CheckCondition(), true_branch, false_branch)\n\n merged_result = merge(true_branch, false_branch)\n ```\n\n Args:\n - *tasks (Task): tasks whose results should be merged into a single result. The tasks are\n assumed to all sit downstream of different `switch` branches, such that only\n one of them will contain a result and the others will all be skipped.\n\n Returns:\n - Task: a Task representing the merged result.\n\n \"\"\"\n return Merge().bind(**{\"task_{}\".format(i + 1): t for i, t in enumerate(tasks)})\n", "path": "src/prefect/tasks/control_flow/conditional.py"}]}
1,921
665
gh_patches_debug_50578
rasdani/github-patches
git_diff
streamlit__streamlit-4076
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Slider value visually overlaps with the edge of the sidebar # Summary The value label collides with the edges of the sidebar. It should change alignment when it gets close to the edges. # Steps to reproduce ```py min_weight = st.sidebar.slider("Minimum weight", 2500, 6500) ``` ## Expected behavior: The value label should not collide with the edges. ## Actual behavior: <img width="382" alt="Screen Shot 2020-09-30 at 22 42 44" src="https://user-images.githubusercontent.com/589034/94772484-97067200-036e-11eb-9f82-10453aa7452e.png"> ## Is this a regression? no # Debug info - Streamlit version: 0.65.2 - Browser version: Chrome --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `e2e/scripts/st_slider.py` Content: ``` 1 # Copyright 2018-2021 Streamlit Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import streamlit as st 16 17 w1 = st.slider("Label 1", 0, 100, 25, 1) 18 st.write("Value 1:", w1) 19 20 w2 = st.slider("Label 2", 0.0, 100.0, (25.0, 75.0), 0.5) 21 st.write("Value 2:", w2) 22 23 w3 = st.slider( 24 "Label 3 - This is a very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very long label", 25 0, 26 100, 27 1, 28 1, 29 ) 30 st.write("Value 3:", w3) 31 32 if st._is_running_with_streamlit: 33 34 def on_change(): 35 st.session_state.slider_changed = True 36 37 st.slider( 38 "Label 4", 39 min_value=0, 40 max_value=100, 41 value=25, 42 step=1, 43 key="slider4", 44 on_change=on_change, 45 ) 46 st.write("Value 4:", st.session_state.slider4) 47 st.write("Slider changed:", "slider_changed" in st.session_state) 48 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/e2e/scripts/st_slider.py b/e2e/scripts/st_slider.py --- a/e2e/scripts/st_slider.py +++ b/e2e/scripts/st_slider.py @@ -14,6 +14,13 @@ import streamlit as st +s1 = st.sidebar.slider("Label A", 0, 12345678, 12345678) +st.sidebar.write("Value A:", s1) + +with st.sidebar.expander("Expander"): + s2 = st.slider("Label B", 10000, 25000, 10000) + st.write("Value B:", s2) + w1 = st.slider("Label 1", 0, 100, 25, 1) st.write("Value 1:", w1)
{"golden_diff": "diff --git a/e2e/scripts/st_slider.py b/e2e/scripts/st_slider.py\n--- a/e2e/scripts/st_slider.py\n+++ b/e2e/scripts/st_slider.py\n@@ -14,6 +14,13 @@\n \n import streamlit as st\n \n+s1 = st.sidebar.slider(\"Label A\", 0, 12345678, 12345678)\n+st.sidebar.write(\"Value A:\", s1)\n+\n+with st.sidebar.expander(\"Expander\"):\n+ s2 = st.slider(\"Label B\", 10000, 25000, 10000)\n+ st.write(\"Value B:\", s2)\n+\n w1 = st.slider(\"Label 1\", 0, 100, 25, 1)\n st.write(\"Value 1:\", w1)\n", "issue": "Slider value visually overlaps with the edge of the sidebar\n# Summary\r\n\r\nThe value label collides with the edges of the sidebar. It should change alignment when it gets close to the edges. \r\n\r\n# Steps to reproduce\r\n\r\n```py\r\nmin_weight = st.sidebar.slider(\"Minimum weight\", 2500, 6500)\r\n```\r\n\r\n## Expected behavior:\r\n\r\nThe value label should not collide with the edges. \r\n\r\n## Actual behavior:\r\n\r\n<img width=\"382\" alt=\"Screen Shot 2020-09-30 at 22 42 44\" src=\"https://user-images.githubusercontent.com/589034/94772484-97067200-036e-11eb-9f82-10453aa7452e.png\">\r\n\r\n\r\n## Is this a regression?\r\n\r\nno\r\n\r\n# Debug info\r\n\r\n- Streamlit version: 0.65.2\r\n- Browser version: Chrome\r\n\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nw1 = st.slider(\"Label 1\", 0, 100, 25, 1)\nst.write(\"Value 1:\", w1)\n\nw2 = st.slider(\"Label 2\", 0.0, 100.0, (25.0, 75.0), 0.5)\nst.write(\"Value 2:\", w2)\n\nw3 = st.slider(\n \"Label 3 - This is a very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very long label\",\n 0,\n 100,\n 1,\n 1,\n)\nst.write(\"Value 3:\", w3)\n\nif st._is_running_with_streamlit:\n\n def on_change():\n st.session_state.slider_changed = True\n\n st.slider(\n \"Label 4\",\n min_value=0,\n max_value=100,\n value=25,\n step=1,\n key=\"slider4\",\n on_change=on_change,\n )\n st.write(\"Value 4:\", st.session_state.slider4)\n st.write(\"Slider changed:\", \"slider_changed\" in st.session_state)\n", "path": "e2e/scripts/st_slider.py"}], "after_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\ns1 = st.sidebar.slider(\"Label A\", 0, 12345678, 12345678)\nst.sidebar.write(\"Value A:\", s1)\n\nwith st.sidebar.expander(\"Expander\"):\n s2 = st.slider(\"Label B\", 10000, 25000, 10000)\n st.write(\"Value B:\", s2)\n\nw1 = st.slider(\"Label 1\", 0, 100, 25, 1)\nst.write(\"Value 1:\", w1)\n\nw2 = st.slider(\"Label 2\", 0.0, 100.0, (25.0, 75.0), 0.5)\nst.write(\"Value 2:\", w2)\n\nw3 = st.slider(\n \"Label 3 - This is a very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very long label\",\n 0,\n 100,\n 1,\n 1,\n)\nst.write(\"Value 3:\", w3)\n\nif st._is_running_with_streamlit:\n\n def on_change():\n st.session_state.slider_changed = True\n\n st.slider(\n \"Label 4\",\n min_value=0,\n max_value=100,\n value=25,\n step=1,\n key=\"slider4\",\n on_change=on_change,\n )\n st.write(\"Value 4:\", st.session_state.slider4)\n st.write(\"Slider changed:\", \"slider_changed\" in st.session_state)\n", "path": "e2e/scripts/st_slider.py"}]}
994
194
gh_patches_debug_14317
rasdani/github-patches
git_diff
keras-team__keras-18911
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error in Documentation The documentation confuses the ‘channels_last’ input format. It needs to be changed to match the correct format ==> (batch_size, height, width, channels) https://github.com/keras-team/keras/blob/037ec9f5fc61a53c6e1f4c02b7bf1443429dcd45/keras/layers/convolutional/conv2d_transpose.py#L35 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `keras/layers/convolutional/conv2d_transpose.py` Content: ``` 1 from keras.api_export import keras_export 2 from keras.layers.convolutional.base_conv_transpose import BaseConvTranspose 3 4 5 @keras_export( 6 [ 7 "keras.layers.Conv2DTranspose", 8 "keras.layers.Convolution2DTranspose", 9 ] 10 ) 11 class Conv2DTranspose(BaseConvTranspose): 12 """2D transposed convolution layer. 13 14 The need for transposed convolutions generally arise from the desire to use 15 a transformation going in the opposite direction of a normal convolution, 16 i.e., from something that has the shape of the output of some convolution 17 to something that has the shape of its input while maintaining a 18 connectivity pattern that is compatible with said convolution. 19 20 Args: 21 filters: int, the dimension of the output space (the number of filters 22 in the transposed convolution). 23 kernel_size: int or tuple/list of 1 integer, specifying the size of the 24 transposed convolution window. 25 strides: int or tuple/list of 1 integer, specifying the stride length 26 of the transposed convolution. `strides > 1` is incompatible with 27 `dilation_rate > 1`. 28 padding: string, either `"valid"` or `"same"` (case-insensitive). 29 `"valid"` means no padding. `"same"` results in padding evenly to 30 the left/right or up/down of the input. When `padding="same"` and 31 `strides=1`, the output has the same size as the input. 32 data_format: string, either `"channels_last"` or `"channels_first"`. 33 The ordering of the dimensions in the inputs. `"channels_last"` 34 corresponds to inputs with shape 35 `(batch_size, channels, height, width)` 36 while `"channels_first"` corresponds to inputs with shape 37 `(batch_size, channels, height, width)`. It defaults to the 38 `image_data_format` value found in your Keras config file at 39 `~/.keras/keras.json`. If you never set it, then it will be 40 `"channels_last"`. 41 dilation_rate: int or tuple/list of 1 integers, specifying the dilation 42 rate to use for dilated transposed convolution. 43 activation: Activation function. If `None`, no activation is applied. 44 use_bias: bool, if `True`, bias will be added to the output. 45 kernel_initializer: Initializer for the convolution kernel. If `None`, 46 the default initializer (`"glorot_uniform"`) will be used. 47 bias_initializer: Initializer for the bias vector. If `None`, the 48 default initializer (`"zeros"`) will be used. 49 kernel_regularizer: Optional regularizer for the convolution kernel. 50 bias_regularizer: Optional regularizer for the bias vector. 51 activity_regularizer: Optional regularizer function for the output. 52 kernel_constraint: Optional projection function to be applied to the 53 kernel after being updated by an `Optimizer` (e.g. used to implement 54 norm constraints or value constraints for layer weights). The 55 function must take as input the unprojected variable and must return 56 the projected variable (which must have the same shape). Constraints 57 are not safe to use when doing asynchronous distributed training. 58 bias_constraint: Optional projection function to be applied to the 59 bias after being updated by an `Optimizer`. 60 61 Input shape: 62 - If `data_format="channels_last"`: 63 A 4D tensor with shape: `(batch_size, height, width, channels)` 64 - If `data_format="channels_first"`: 65 A 4D tensor with shape: `(batch_size, channels, height, width)` 66 67 Output shape: 68 - If `data_format="channels_last"`: 69 A 4D tensor with shape: `(batch_size, new_height, new_width, filters)` 70 - If `data_format="channels_first"`: 71 A 4D tensor with shape: `(batch_size, filters, new_height, new_width)` 72 73 Returns: 74 A 4D tensor representing 75 `activation(conv2d_transpose(inputs, kernel) + bias)`. 76 77 Raises: 78 ValueError: when both `strides > 1` and `dilation_rate > 1`. 79 80 References: 81 - [A guide to convolution arithmetic for deep learning]( 82 https://arxiv.org/abs/1603.07285v1) 83 - [Deconvolutional Networks]( 84 https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf) 85 86 Examples: 87 88 >>> x = np.random.rand(4, 10, 8, 128) 89 >>> y = keras.layers.Conv2DTranspose(32, 2, 2, activation='relu')(x) 90 >>> print(y.shape) 91 (4, 20, 16, 32) 92 """ 93 94 def __init__( 95 self, 96 filters, 97 kernel_size, 98 strides=(1, 1), 99 padding="valid", 100 data_format=None, 101 dilation_rate=(1, 1), 102 activation=None, 103 use_bias=True, 104 kernel_initializer="glorot_uniform", 105 bias_initializer="zeros", 106 kernel_regularizer=None, 107 bias_regularizer=None, 108 activity_regularizer=None, 109 kernel_constraint=None, 110 bias_constraint=None, 111 **kwargs 112 ): 113 super().__init__( 114 rank=2, 115 filters=filters, 116 kernel_size=kernel_size, 117 strides=strides, 118 padding=padding, 119 data_format=data_format, 120 dilation_rate=dilation_rate, 121 activation=activation, 122 use_bias=use_bias, 123 kernel_initializer=kernel_initializer, 124 bias_initializer=bias_initializer, 125 kernel_regularizer=kernel_regularizer, 126 bias_regularizer=bias_regularizer, 127 activity_regularizer=activity_regularizer, 128 kernel_constraint=kernel_constraint, 129 bias_constraint=bias_constraint, 130 **kwargs 131 ) 132 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/keras/layers/convolutional/conv2d_transpose.py b/keras/layers/convolutional/conv2d_transpose.py --- a/keras/layers/convolutional/conv2d_transpose.py +++ b/keras/layers/convolutional/conv2d_transpose.py @@ -32,7 +32,7 @@ data_format: string, either `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape - `(batch_size, channels, height, width)` + `(batch_size, height, width, channels)` while `"channels_first"` corresponds to inputs with shape `(batch_size, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at
{"golden_diff": "diff --git a/keras/layers/convolutional/conv2d_transpose.py b/keras/layers/convolutional/conv2d_transpose.py\n--- a/keras/layers/convolutional/conv2d_transpose.py\n+++ b/keras/layers/convolutional/conv2d_transpose.py\n@@ -32,7 +32,7 @@\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape\n- `(batch_size, channels, height, width)`\n+ `(batch_size, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch_size, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n", "issue": "Error in Documentation\nThe documentation confuses the \u2018channels_last\u2019 input format. It needs to be changed to match the correct format ==> (batch_size, height, width, channels)\r\n\r\nhttps://github.com/keras-team/keras/blob/037ec9f5fc61a53c6e1f4c02b7bf1443429dcd45/keras/layers/convolutional/conv2d_transpose.py#L35\n", "before_files": [{"content": "from keras.api_export import keras_export\nfrom keras.layers.convolutional.base_conv_transpose import BaseConvTranspose\n\n\n@keras_export(\n [\n \"keras.layers.Conv2DTranspose\",\n \"keras.layers.Convolution2DTranspose\",\n ]\n)\nclass Conv2DTranspose(BaseConvTranspose):\n \"\"\"2D transposed convolution layer.\n\n The need for transposed convolutions generally arise from the desire to use\n a transformation going in the opposite direction of a normal convolution,\n i.e., from something that has the shape of the output of some convolution\n to something that has the shape of its input while maintaining a\n connectivity pattern that is compatible with said convolution.\n\n Args:\n filters: int, the dimension of the output space (the number of filters\n in the transposed convolution).\n kernel_size: int or tuple/list of 1 integer, specifying the size of the\n transposed convolution window.\n strides: int or tuple/list of 1 integer, specifying the stride length\n of the transposed convolution. `strides > 1` is incompatible with\n `dilation_rate > 1`.\n padding: string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input. When `padding=\"same\"` and\n `strides=1`, the output has the same size as the input.\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch_size, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.\n dilation_rate: int or tuple/list of 1 integers, specifying the dilation\n rate to use for dilated transposed convolution.\n activation: Activation function. If `None`, no activation is applied.\n use_bias: bool, if `True`, bias will be added to the output.\n kernel_initializer: Initializer for the convolution kernel. If `None`,\n the default initializer (`\"glorot_uniform\"`) will be used.\n bias_initializer: Initializer for the bias vector. If `None`, the\n default initializer (`\"zeros\"`) will be used.\n kernel_regularizer: Optional regularizer for the convolution kernel.\n bias_regularizer: Optional regularizer for the bias vector.\n activity_regularizer: Optional regularizer function for the output.\n kernel_constraint: Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The\n function must take as input the unprojected variable and must return\n the projected variable (which must have the same shape). Constraints\n are not safe to use when doing asynchronous distributed training.\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n\n Input shape:\n - If `data_format=\"channels_last\"`:\n A 4D tensor with shape: `(batch_size, height, width, channels)`\n - If `data_format=\"channels_first\"`:\n A 4D tensor with shape: `(batch_size, channels, height, width)`\n\n Output shape:\n - If `data_format=\"channels_last\"`:\n A 4D tensor with shape: `(batch_size, new_height, new_width, filters)`\n - If `data_format=\"channels_first\"`:\n A 4D tensor with shape: `(batch_size, filters, new_height, new_width)`\n\n Returns:\n A 4D tensor representing\n `activation(conv2d_transpose(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: when both `strides > 1` and `dilation_rate > 1`.\n\n References:\n - [A guide to convolution arithmetic for deep learning](\n https://arxiv.org/abs/1603.07285v1)\n - [Deconvolutional Networks](\n https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)\n\n Examples:\n\n >>> x = np.random.rand(4, 10, 8, 128)\n >>> y = keras.layers.Conv2DTranspose(32, 2, 2, activation='relu')(x)\n >>> print(y.shape)\n (4, 20, 16, 32)\n \"\"\"\n\n def __init__(\n self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding=\"valid\",\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n bias_initializer=\"zeros\",\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs\n ):\n super().__init__(\n rank=2,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n **kwargs\n )\n", "path": "keras/layers/convolutional/conv2d_transpose.py"}], "after_files": [{"content": "from keras.api_export import keras_export\nfrom keras.layers.convolutional.base_conv_transpose import BaseConvTranspose\n\n\n@keras_export(\n [\n \"keras.layers.Conv2DTranspose\",\n \"keras.layers.Convolution2DTranspose\",\n ]\n)\nclass Conv2DTranspose(BaseConvTranspose):\n \"\"\"2D transposed convolution layer.\n\n The need for transposed convolutions generally arise from the desire to use\n a transformation going in the opposite direction of a normal convolution,\n i.e., from something that has the shape of the output of some convolution\n to something that has the shape of its input while maintaining a\n connectivity pattern that is compatible with said convolution.\n\n Args:\n filters: int, the dimension of the output space (the number of filters\n in the transposed convolution).\n kernel_size: int or tuple/list of 1 integer, specifying the size of the\n transposed convolution window.\n strides: int or tuple/list of 1 integer, specifying the stride length\n of the transposed convolution. `strides > 1` is incompatible with\n `dilation_rate > 1`.\n padding: string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input. When `padding=\"same\"` and\n `strides=1`, the output has the same size as the input.\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape\n `(batch_size, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch_size, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.\n dilation_rate: int or tuple/list of 1 integers, specifying the dilation\n rate to use for dilated transposed convolution.\n activation: Activation function. If `None`, no activation is applied.\n use_bias: bool, if `True`, bias will be added to the output.\n kernel_initializer: Initializer for the convolution kernel. If `None`,\n the default initializer (`\"glorot_uniform\"`) will be used.\n bias_initializer: Initializer for the bias vector. If `None`, the\n default initializer (`\"zeros\"`) will be used.\n kernel_regularizer: Optional regularizer for the convolution kernel.\n bias_regularizer: Optional regularizer for the bias vector.\n activity_regularizer: Optional regularizer function for the output.\n kernel_constraint: Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The\n function must take as input the unprojected variable and must return\n the projected variable (which must have the same shape). Constraints\n are not safe to use when doing asynchronous distributed training.\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n\n Input shape:\n - If `data_format=\"channels_last\"`:\n A 4D tensor with shape: `(batch_size, height, width, channels)`\n - If `data_format=\"channels_first\"`:\n A 4D tensor with shape: `(batch_size, channels, height, width)`\n\n Output shape:\n - If `data_format=\"channels_last\"`:\n A 4D tensor with shape: `(batch_size, new_height, new_width, filters)`\n - If `data_format=\"channels_first\"`:\n A 4D tensor with shape: `(batch_size, filters, new_height, new_width)`\n\n Returns:\n A 4D tensor representing\n `activation(conv2d_transpose(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: when both `strides > 1` and `dilation_rate > 1`.\n\n References:\n - [A guide to convolution arithmetic for deep learning](\n https://arxiv.org/abs/1603.07285v1)\n - [Deconvolutional Networks](\n https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)\n\n Examples:\n\n >>> x = np.random.rand(4, 10, 8, 128)\n >>> y = keras.layers.Conv2DTranspose(32, 2, 2, activation='relu')(x)\n >>> print(y.shape)\n (4, 20, 16, 32)\n \"\"\"\n\n def __init__(\n self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding=\"valid\",\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n bias_initializer=\"zeros\",\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs\n ):\n super().__init__(\n rank=2,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n **kwargs\n )\n", "path": "keras/layers/convolutional/conv2d_transpose.py"}]}
1,905
195
gh_patches_debug_2419
rasdani/github-patches
git_diff
e-valuation__EvaP-1321
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Evaluation preview button visibility As a teaching assistant, I might be a contributor to a given course and therefore get my own feedback in the main evaluation. If that course also has an exam evaluation, I see that listed on my "own evaluations" page with the option to preview the questionnaire. However, as not being responsible, I miss the access rights to preview the linked page, resulting in an error. I would like to either don't have the preview button (it already knows while rendering that page that I am not a contributor, shown through the corresponding icon next to the exam evaluation title) or to give me the rights to preview the questionnaire. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `evap/evaluation/templatetags/evaluation_filters.py` Content: ``` 1 from django.forms import TypedChoiceField 2 from django.template import Library 3 4 from evap.evaluation.models import BASE_UNIPOLAR_CHOICES 5 from evap.evaluation.tools import STATES_ORDERED, STATE_DESCRIPTIONS 6 from evap.rewards.tools import can_reward_points_be_used_by 7 from evap.student.forms import HeadingField 8 9 10 register = Library() 11 12 13 @register.filter(name='zip') 14 def _zip(a, b): 15 return zip(a, b) 16 17 18 @register.filter 19 def ordering_index(evaluation): 20 if evaluation.state in ['new', 'prepared', 'editor_approved', 'approved']: 21 return evaluation.days_until_evaluation 22 elif evaluation.state == "in_evaluation": 23 return 100000 + evaluation.days_left_for_evaluation 24 return 200000 + evaluation.days_left_for_evaluation 25 26 27 # from http://www.jongales.com/blog/2009/10/19/percentage-django-template-tag/ 28 @register.filter 29 def percentage(fraction, population): 30 try: 31 return "{0:.0f}%".format(int(float(fraction) / float(population) * 100)) 32 except ValueError: 33 return None 34 except ZeroDivisionError: 35 return None 36 37 38 @register.filter 39 def percentage_one_decimal(fraction, population): 40 try: 41 return "{0:.1f}%".format((float(fraction) / float(population)) * 100) 42 except ValueError: 43 return None 44 except ZeroDivisionError: 45 return None 46 47 48 @register.filter 49 def percentage_value(fraction, population): 50 try: 51 return "{0:0f}".format((float(fraction) / float(population)) * 100) 52 except ValueError: 53 return None 54 except ZeroDivisionError: 55 return None 56 57 58 @register.filter 59 def to_colors(choices): 60 if not choices: 61 # When displaying the course distribution, there are no associated voting choices. 62 # In that case, we just use the colors of a unipolar scale. 63 return BASE_UNIPOLAR_CHOICES['colors'] 64 return choices.colors 65 66 67 @register.filter 68 def statename(state): 69 return STATES_ORDERED.get(state) 70 71 72 @register.filter 73 def statedescription(state): 74 return STATE_DESCRIPTIONS.get(state) 75 76 77 @register.filter 78 def can_results_page_be_seen_by(evaluation, user): 79 return evaluation.can_results_page_be_seen_by(user) 80 81 82 @register.filter(name='can_reward_points_be_used_by') 83 def _can_reward_points_be_used_by(user): 84 return can_reward_points_be_used_by(user) 85 86 87 @register.filter 88 def is_choice_field(field): 89 return isinstance(field.field, TypedChoiceField) 90 91 92 @register.filter 93 def is_heading_field(field): 94 return isinstance(field.field, HeadingField) 95 96 97 @register.filter 98 def is_user_editor_or_delegate(evaluation, user): 99 return evaluation.is_user_editor_or_delegate(user) 100 101 102 @register.filter 103 def message_class(level): 104 return { 105 'debug': 'info', 106 'info': 'info', 107 'success': 'success', 108 'warning': 'warning', 109 'error': 'danger', 110 }.get(level, 'info') 111 112 113 @register.filter 114 def hours_and_minutes(time_left_for_evaluation): 115 hours = time_left_for_evaluation.seconds // 3600 116 minutes = (time_left_for_evaluation.seconds // 60) % 60 117 return "{:02}:{:02}".format(hours, minutes) 118 119 120 @register.filter 121 def has_nonresponsible_editor(evaluation): 122 return evaluation.contributions.filter(can_edit=True).exclude(contributor__in=evaluation.course.responsibles.all()).exists() 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/evap/evaluation/templatetags/evaluation_filters.py b/evap/evaluation/templatetags/evaluation_filters.py --- a/evap/evaluation/templatetags/evaluation_filters.py +++ b/evap/evaluation/templatetags/evaluation_filters.py @@ -99,6 +99,10 @@ return evaluation.is_user_editor_or_delegate(user) +@register.filter +def is_user_responsible_or_contributor_or_delegate(evaluation, user): + return evaluation.is_user_responsible_or_contributor_or_delegate(user) + @register.filter def message_class(level): return {
{"golden_diff": "diff --git a/evap/evaluation/templatetags/evaluation_filters.py b/evap/evaluation/templatetags/evaluation_filters.py\n--- a/evap/evaluation/templatetags/evaluation_filters.py\n+++ b/evap/evaluation/templatetags/evaluation_filters.py\n@@ -99,6 +99,10 @@\n return evaluation.is_user_editor_or_delegate(user)\n \n \n+@register.filter\n+def is_user_responsible_or_contributor_or_delegate(evaluation, user):\n+ return evaluation.is_user_responsible_or_contributor_or_delegate(user)\n+\n @register.filter\n def message_class(level):\n return {\n", "issue": "Evaluation preview button visibility\nAs a teaching assistant, I might be a contributor to a given course and therefore get my own feedback in the main evaluation. If that course also has an exam evaluation, I see that listed on my \"own evaluations\" page with the option to preview the questionnaire. However, as not being responsible, I miss the access rights to preview the linked page, resulting in an error.\r\n\r\nI would like to either don't have the preview button (it already knows while rendering that page that I am not a contributor, shown through the corresponding icon next to the exam evaluation title) or to give me the rights to preview the questionnaire.\n", "before_files": [{"content": "from django.forms import TypedChoiceField\nfrom django.template import Library\n\nfrom evap.evaluation.models import BASE_UNIPOLAR_CHOICES\nfrom evap.evaluation.tools import STATES_ORDERED, STATE_DESCRIPTIONS\nfrom evap.rewards.tools import can_reward_points_be_used_by\nfrom evap.student.forms import HeadingField\n\n\nregister = Library()\n\n\n@register.filter(name='zip')\ndef _zip(a, b):\n return zip(a, b)\n\n\n@register.filter\ndef ordering_index(evaluation):\n if evaluation.state in ['new', 'prepared', 'editor_approved', 'approved']:\n return evaluation.days_until_evaluation\n elif evaluation.state == \"in_evaluation\":\n return 100000 + evaluation.days_left_for_evaluation\n return 200000 + evaluation.days_left_for_evaluation\n\n\n# from http://www.jongales.com/blog/2009/10/19/percentage-django-template-tag/\n@register.filter\ndef percentage(fraction, population):\n try:\n return \"{0:.0f}%\".format(int(float(fraction) / float(population) * 100))\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\n@register.filter\ndef percentage_one_decimal(fraction, population):\n try:\n return \"{0:.1f}%\".format((float(fraction) / float(population)) * 100)\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\n@register.filter\ndef percentage_value(fraction, population):\n try:\n return \"{0:0f}\".format((float(fraction) / float(population)) * 100)\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\n@register.filter\ndef to_colors(choices):\n if not choices:\n # When displaying the course distribution, there are no associated voting choices.\n # In that case, we just use the colors of a unipolar scale.\n return BASE_UNIPOLAR_CHOICES['colors']\n return choices.colors\n\n\n@register.filter\ndef statename(state):\n return STATES_ORDERED.get(state)\n\n\n@register.filter\ndef statedescription(state):\n return STATE_DESCRIPTIONS.get(state)\n\n\n@register.filter\ndef can_results_page_be_seen_by(evaluation, user):\n return evaluation.can_results_page_be_seen_by(user)\n\n\n@register.filter(name='can_reward_points_be_used_by')\ndef _can_reward_points_be_used_by(user):\n return can_reward_points_be_used_by(user)\n\n\n@register.filter\ndef is_choice_field(field):\n return isinstance(field.field, TypedChoiceField)\n\n\n@register.filter\ndef is_heading_field(field):\n return isinstance(field.field, HeadingField)\n\n\n@register.filter\ndef is_user_editor_or_delegate(evaluation, user):\n return evaluation.is_user_editor_or_delegate(user)\n\n\n@register.filter\ndef message_class(level):\n return {\n 'debug': 'info',\n 'info': 'info',\n 'success': 'success',\n 'warning': 'warning',\n 'error': 'danger',\n }.get(level, 'info')\n\n\n@register.filter\ndef hours_and_minutes(time_left_for_evaluation):\n hours = time_left_for_evaluation.seconds // 3600\n minutes = (time_left_for_evaluation.seconds // 60) % 60\n return \"{:02}:{:02}\".format(hours, minutes)\n\n\n@register.filter\ndef has_nonresponsible_editor(evaluation):\n return evaluation.contributions.filter(can_edit=True).exclude(contributor__in=evaluation.course.responsibles.all()).exists()\n", "path": "evap/evaluation/templatetags/evaluation_filters.py"}], "after_files": [{"content": "from django.forms import TypedChoiceField\nfrom django.template import Library\n\nfrom evap.evaluation.models import BASE_UNIPOLAR_CHOICES\nfrom evap.evaluation.tools import STATES_ORDERED, STATE_DESCRIPTIONS\nfrom evap.rewards.tools import can_reward_points_be_used_by\nfrom evap.student.forms import HeadingField\n\n\nregister = Library()\n\n\n@register.filter(name='zip')\ndef _zip(a, b):\n return zip(a, b)\n\n\n@register.filter\ndef ordering_index(evaluation):\n if evaluation.state in ['new', 'prepared', 'editor_approved', 'approved']:\n return evaluation.days_until_evaluation\n elif evaluation.state == \"in_evaluation\":\n return 100000 + evaluation.days_left_for_evaluation\n return 200000 + evaluation.days_left_for_evaluation\n\n\n# from http://www.jongales.com/blog/2009/10/19/percentage-django-template-tag/\n@register.filter\ndef percentage(fraction, population):\n try:\n return \"{0:.0f}%\".format(int(float(fraction) / float(population) * 100))\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\n@register.filter\ndef percentage_one_decimal(fraction, population):\n try:\n return \"{0:.1f}%\".format((float(fraction) / float(population)) * 100)\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\n@register.filter\ndef percentage_value(fraction, population):\n try:\n return \"{0:0f}\".format((float(fraction) / float(population)) * 100)\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\n@register.filter\ndef to_colors(choices):\n if not choices:\n # When displaying the course distribution, there are no associated voting choices.\n # In that case, we just use the colors of a unipolar scale.\n return BASE_UNIPOLAR_CHOICES['colors']\n return choices.colors\n\n\n@register.filter\ndef statename(state):\n return STATES_ORDERED.get(state)\n\n\n@register.filter\ndef statedescription(state):\n return STATE_DESCRIPTIONS.get(state)\n\n\n@register.filter\ndef can_results_page_be_seen_by(evaluation, user):\n return evaluation.can_results_page_be_seen_by(user)\n\n\n@register.filter(name='can_reward_points_be_used_by')\ndef _can_reward_points_be_used_by(user):\n return can_reward_points_be_used_by(user)\n\n\n@register.filter\ndef is_choice_field(field):\n return isinstance(field.field, TypedChoiceField)\n\n\n@register.filter\ndef is_heading_field(field):\n return isinstance(field.field, HeadingField)\n\n\n@register.filter\ndef is_user_editor_or_delegate(evaluation, user):\n return evaluation.is_user_editor_or_delegate(user)\n\n\n@register.filter\ndef is_user_responsible_or_contributor_or_delegate(evaluation, user):\n return evaluation.is_user_responsible_or_contributor_or_delegate(user)\n\n@register.filter\ndef message_class(level):\n return {\n 'debug': 'info',\n 'info': 'info',\n 'success': 'success',\n 'warning': 'warning',\n 'error': 'danger',\n }.get(level, 'info')\n\n\n@register.filter\ndef hours_and_minutes(time_left_for_evaluation):\n hours = time_left_for_evaluation.seconds // 3600\n minutes = (time_left_for_evaluation.seconds // 60) % 60\n return \"{:02}:{:02}\".format(hours, minutes)\n\n\n@register.filter\ndef has_nonresponsible_editor(evaluation):\n return evaluation.contributions.filter(can_edit=True).exclude(contributor__in=evaluation.course.responsibles.all()).exists()\n", "path": "evap/evaluation/templatetags/evaluation_filters.py"}]}
1,441
141
gh_patches_debug_2890
rasdani/github-patches
git_diff
pyg-team__pytorch_geometric-8179
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Dataset is not undirected ### 🐛 Describe the bug Dataset is not undirected, despite passing ``to_undirected=True`` flag. ```python # !pip install pyg-nightly from torch_geometric.datasets import CitationFull from torch_geometric.utils import is_undirected edge_index = CitationFull(root=".", name="Cora_ML", to_undirected=True).edge_index is_undirected(edge_index) ``` The above outputs: *False* ### Environment * PyG version: 2.4.0.dev20231010 * PyTorch version: 2.0.1+cu118 * OS: Colab * Python version: 3.10.12 * CUDA/cuDNN version: 11.8 * How you installed PyTorch and PyG (`conda`, `pip`, source): pip * Any other relevant information (*e.g.*, version of `torch-scatter`): --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torch_geometric/datasets/citation_full.py` Content: ``` 1 import os.path as osp 2 from typing import Callable, Optional 3 4 import torch 5 6 from torch_geometric.data import InMemoryDataset, download_url 7 from torch_geometric.io import read_npz 8 9 10 class CitationFull(InMemoryDataset): 11 r"""The full citation network datasets from the 12 `"Deep Gaussian Embedding of Graphs: Unsupervised Inductive Learning via 13 Ranking" <https://arxiv.org/abs/1707.03815>`_ paper. 14 Nodes represent documents and edges represent citation links. 15 Datasets include :obj:`"Cora"`, :obj:`"Cora_ML"`, :obj:`"CiteSeer"`, 16 :obj:`"DBLP"`, :obj:`"PubMed"`. 17 18 Args: 19 root (str): Root directory where the dataset should be saved. 20 name (str): The name of the dataset (:obj:`"Cora"`, :obj:`"Cora_ML"` 21 :obj:`"CiteSeer"`, :obj:`"DBLP"`, :obj:`"PubMed"`). 22 transform (callable, optional): A function/transform that takes in an 23 :obj:`torch_geometric.data.Data` object and returns a transformed 24 version. The data object will be transformed before every access. 25 (default: :obj:`None`) 26 pre_transform (callable, optional): A function/transform that takes in 27 an :obj:`torch_geometric.data.Data` object and returns a 28 transformed version. The data object will be transformed before 29 being saved to disk. (default: :obj:`None`) 30 to_undirected (bool, optional): Whether the original graph is 31 converted to an undirected one. (default: :obj:`True`) 32 33 **STATS:** 34 35 .. list-table:: 36 :widths: 10 10 10 10 10 37 :header-rows: 1 38 39 * - Name 40 - #nodes 41 - #edges 42 - #features 43 - #classes 44 * - Cora 45 - 19,793 46 - 126,842 47 - 8,710 48 - 70 49 * - Cora_ML 50 - 2,995 51 - 16,316 52 - 2,879 53 - 7 54 * - CiteSeer 55 - 4,230 56 - 10,674 57 - 602 58 - 6 59 * - DBLP 60 - 17,716 61 - 105,734 62 - 1,639 63 - 4 64 * - PubMed 65 - 19,717 66 - 88,648 67 - 500 68 - 3 69 """ 70 71 url = 'https://github.com/abojchevski/graph2gauss/raw/master/data/{}.npz' 72 73 def __init__( 74 self, 75 root: str, 76 name: str, 77 transform: Optional[Callable] = None, 78 pre_transform: Optional[Callable] = None, 79 to_undirected: bool = True, 80 ): 81 self.name = name.lower() 82 self.to_undirected = to_undirected 83 assert self.name in ['cora', 'cora_ml', 'citeseer', 'dblp', 'pubmed'] 84 super().__init__(root, transform, pre_transform) 85 self.data, self.slices = torch.load(self.processed_paths[0]) 86 87 @property 88 def raw_dir(self) -> str: 89 return osp.join(self.root, self.name, 'raw') 90 91 @property 92 def processed_dir(self) -> str: 93 return osp.join(self.root, self.name, 'processed') 94 95 @property 96 def raw_file_names(self) -> str: 97 return f'{self.name}.npz' 98 99 @property 100 def processed_file_names(self) -> str: 101 return 'data.pt' 102 103 def download(self): 104 download_url(self.url.format(self.name), self.raw_dir) 105 106 def process(self): 107 data = read_npz(self.raw_paths[0], to_undirected=self.to_undirected) 108 data = data if self.pre_transform is None else self.pre_transform(data) 109 data, slices = self.collate([data]) 110 torch.save((data, slices), self.processed_paths[0]) 111 112 def __repr__(self) -> str: 113 return f'{self.name.capitalize()}Full()' 114 115 116 class CoraFull(CitationFull): 117 r"""Alias for :class:`~torch_geometric.datasets.CitationFull` with 118 :obj:`name="Cora"`. 119 120 **STATS:** 121 122 .. list-table:: 123 :widths: 10 10 10 10 124 :header-rows: 1 125 126 * - #nodes 127 - #edges 128 - #features 129 - #classes 130 * - 19,793 131 - 126,842 132 - 8,710 133 - 70 134 """ 135 def __init__(self, root: str, transform: Optional[Callable] = None, 136 pre_transform: Optional[Callable] = None): 137 super().__init__(root, 'cora', transform, pre_transform) 138 139 def download(self): 140 super().download() 141 142 def process(self): 143 super().process() 144 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torch_geometric/datasets/citation_full.py b/torch_geometric/datasets/citation_full.py --- a/torch_geometric/datasets/citation_full.py +++ b/torch_geometric/datasets/citation_full.py @@ -98,7 +98,8 @@ @property def processed_file_names(self) -> str: - return 'data.pt' + suffix = 'undirected' if self.to_undirected else 'directed' + return f'data_{suffix}.pt' def download(self): download_url(self.url.format(self.name), self.raw_dir)
{"golden_diff": "diff --git a/torch_geometric/datasets/citation_full.py b/torch_geometric/datasets/citation_full.py\n--- a/torch_geometric/datasets/citation_full.py\n+++ b/torch_geometric/datasets/citation_full.py\n@@ -98,7 +98,8 @@\n \n @property\n def processed_file_names(self) -> str:\n- return 'data.pt'\n+ suffix = 'undirected' if self.to_undirected else 'directed'\n+ return f'data_{suffix}.pt'\n \n def download(self):\n download_url(self.url.format(self.name), self.raw_dir)\n", "issue": "Dataset is not undirected\n### \ud83d\udc1b Describe the bug\n\nDataset is not undirected, despite passing ``to_undirected=True`` flag.\r\n\r\n```python\r\n# !pip install pyg-nightly\r\n\r\nfrom torch_geometric.datasets import CitationFull\r\nfrom torch_geometric.utils import is_undirected\r\n\r\nedge_index = CitationFull(root=\".\", name=\"Cora_ML\", to_undirected=True).edge_index\r\nis_undirected(edge_index)\r\n```\r\nThe above outputs: *False*\n\n### Environment\n\n* PyG version: 2.4.0.dev20231010\r\n* PyTorch version: 2.0.1+cu118\r\n* OS: Colab\r\n* Python version: 3.10.12\r\n* CUDA/cuDNN version: 11.8\r\n* How you installed PyTorch and PyG (`conda`, `pip`, source): pip\r\n* Any other relevant information (*e.g.*, version of `torch-scatter`):\r\n\n", "before_files": [{"content": "import os.path as osp\nfrom typing import Callable, Optional\n\nimport torch\n\nfrom torch_geometric.data import InMemoryDataset, download_url\nfrom torch_geometric.io import read_npz\n\n\nclass CitationFull(InMemoryDataset):\n r\"\"\"The full citation network datasets from the\n `\"Deep Gaussian Embedding of Graphs: Unsupervised Inductive Learning via\n Ranking\" <https://arxiv.org/abs/1707.03815>`_ paper.\n Nodes represent documents and edges represent citation links.\n Datasets include :obj:`\"Cora\"`, :obj:`\"Cora_ML\"`, :obj:`\"CiteSeer\"`,\n :obj:`\"DBLP\"`, :obj:`\"PubMed\"`.\n\n Args:\n root (str): Root directory where the dataset should be saved.\n name (str): The name of the dataset (:obj:`\"Cora\"`, :obj:`\"Cora_ML\"`\n :obj:`\"CiteSeer\"`, :obj:`\"DBLP\"`, :obj:`\"PubMed\"`).\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n to_undirected (bool, optional): Whether the original graph is\n converted to an undirected one. (default: :obj:`True`)\n\n **STATS:**\n\n .. list-table::\n :widths: 10 10 10 10 10\n :header-rows: 1\n\n * - Name\n - #nodes\n - #edges\n - #features\n - #classes\n * - Cora\n - 19,793\n - 126,842\n - 8,710\n - 70\n * - Cora_ML\n - 2,995\n - 16,316\n - 2,879\n - 7\n * - CiteSeer\n - 4,230\n - 10,674\n - 602\n - 6\n * - DBLP\n - 17,716\n - 105,734\n - 1,639\n - 4\n * - PubMed\n - 19,717\n - 88,648\n - 500\n - 3\n \"\"\"\n\n url = 'https://github.com/abojchevski/graph2gauss/raw/master/data/{}.npz'\n\n def __init__(\n self,\n root: str,\n name: str,\n transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None,\n to_undirected: bool = True,\n ):\n self.name = name.lower()\n self.to_undirected = to_undirected\n assert self.name in ['cora', 'cora_ml', 'citeseer', 'dblp', 'pubmed']\n super().__init__(root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_dir(self) -> str:\n return osp.join(self.root, self.name, 'raw')\n\n @property\n def processed_dir(self) -> str:\n return osp.join(self.root, self.name, 'processed')\n\n @property\n def raw_file_names(self) -> str:\n return f'{self.name}.npz'\n\n @property\n def processed_file_names(self) -> str:\n return 'data.pt'\n\n def download(self):\n download_url(self.url.format(self.name), self.raw_dir)\n\n def process(self):\n data = read_npz(self.raw_paths[0], to_undirected=self.to_undirected)\n data = data if self.pre_transform is None else self.pre_transform(data)\n data, slices = self.collate([data])\n torch.save((data, slices), self.processed_paths[0])\n\n def __repr__(self) -> str:\n return f'{self.name.capitalize()}Full()'\n\n\nclass CoraFull(CitationFull):\n r\"\"\"Alias for :class:`~torch_geometric.datasets.CitationFull` with\n :obj:`name=\"Cora\"`.\n\n **STATS:**\n\n .. list-table::\n :widths: 10 10 10 10\n :header-rows: 1\n\n * - #nodes\n - #edges\n - #features\n - #classes\n * - 19,793\n - 126,842\n - 8,710\n - 70\n \"\"\"\n def __init__(self, root: str, transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None):\n super().__init__(root, 'cora', transform, pre_transform)\n\n def download(self):\n super().download()\n\n def process(self):\n super().process()\n", "path": "torch_geometric/datasets/citation_full.py"}], "after_files": [{"content": "import os.path as osp\nfrom typing import Callable, Optional\n\nimport torch\n\nfrom torch_geometric.data import InMemoryDataset, download_url\nfrom torch_geometric.io import read_npz\n\n\nclass CitationFull(InMemoryDataset):\n r\"\"\"The full citation network datasets from the\n `\"Deep Gaussian Embedding of Graphs: Unsupervised Inductive Learning via\n Ranking\" <https://arxiv.org/abs/1707.03815>`_ paper.\n Nodes represent documents and edges represent citation links.\n Datasets include :obj:`\"Cora\"`, :obj:`\"Cora_ML\"`, :obj:`\"CiteSeer\"`,\n :obj:`\"DBLP\"`, :obj:`\"PubMed\"`.\n\n Args:\n root (str): Root directory where the dataset should be saved.\n name (str): The name of the dataset (:obj:`\"Cora\"`, :obj:`\"Cora_ML\"`\n :obj:`\"CiteSeer\"`, :obj:`\"DBLP\"`, :obj:`\"PubMed\"`).\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n to_undirected (bool, optional): Whether the original graph is\n converted to an undirected one. (default: :obj:`True`)\n\n **STATS:**\n\n .. list-table::\n :widths: 10 10 10 10 10\n :header-rows: 1\n\n * - Name\n - #nodes\n - #edges\n - #features\n - #classes\n * - Cora\n - 19,793\n - 126,842\n - 8,710\n - 70\n * - Cora_ML\n - 2,995\n - 16,316\n - 2,879\n - 7\n * - CiteSeer\n - 4,230\n - 10,674\n - 602\n - 6\n * - DBLP\n - 17,716\n - 105,734\n - 1,639\n - 4\n * - PubMed\n - 19,717\n - 88,648\n - 500\n - 3\n \"\"\"\n\n url = 'https://github.com/abojchevski/graph2gauss/raw/master/data/{}.npz'\n\n def __init__(\n self,\n root: str,\n name: str,\n transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None,\n to_undirected: bool = True,\n ):\n self.name = name.lower()\n self.to_undirected = to_undirected\n assert self.name in ['cora', 'cora_ml', 'citeseer', 'dblp', 'pubmed']\n super().__init__(root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_dir(self) -> str:\n return osp.join(self.root, self.name, 'raw')\n\n @property\n def processed_dir(self) -> str:\n return osp.join(self.root, self.name, 'processed')\n\n @property\n def raw_file_names(self) -> str:\n return f'{self.name}.npz'\n\n @property\n def processed_file_names(self) -> str:\n suffix = 'undirected' if self.to_undirected else 'directed'\n return f'data_{suffix}.pt'\n\n def download(self):\n download_url(self.url.format(self.name), self.raw_dir)\n\n def process(self):\n data = read_npz(self.raw_paths[0], to_undirected=self.to_undirected)\n data = data if self.pre_transform is None else self.pre_transform(data)\n data, slices = self.collate([data])\n torch.save((data, slices), self.processed_paths[0])\n\n def __repr__(self) -> str:\n return f'{self.name.capitalize()}Full()'\n\n\nclass CoraFull(CitationFull):\n r\"\"\"Alias for :class:`~torch_geometric.datasets.CitationFull` with\n :obj:`name=\"Cora\"`.\n\n **STATS:**\n\n .. list-table::\n :widths: 10 10 10 10\n :header-rows: 1\n\n * - #nodes\n - #edges\n - #features\n - #classes\n * - 19,793\n - 126,842\n - 8,710\n - 70\n \"\"\"\n def __init__(self, root: str, transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None):\n super().__init__(root, 'cora', transform, pre_transform)\n\n def download(self):\n super().download()\n\n def process(self):\n super().process()\n", "path": "torch_geometric/datasets/citation_full.py"}]}
2,025
135
gh_patches_debug_3654
rasdani/github-patches
git_diff
spack__spack-2961
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fail to build libmng [AutotoolsPackage] @alalazo I got this error while installing `libmng`: ``` ... ==> Already patched libmng ==> Building libmng [AutotoolsPackage] ==> Executing phase : 'autoreconf' ==> Executing phase : 'configure' ==> Error: ProcessError: Command exited with status 1: '/my/path/spack/var/spack/stage/libmng-2.0.2-2x5fkukzf3sf4uexegr3n35jwmy5pclu/libmng-2.0.2/configure' '--prefix=/my/path/spack/opt/spack/linux-scientificcernslc6-x86_64/gcc-6.2.0/libmng-2.0.2-2x5fkukzf3sf4uexegr3n35jwmy5pclu' /my/path/spack/lib/spack/spack/build_systems/autotools.py:265, in configure: 258 def configure(self, spec, prefix): 259 """Runs configure with the arguments specified in :py:meth:`.configure_args` 260 and an appropriately set prefix. 261 """ 262 options = ['--prefix={0}'.format(prefix)] + self.configure_args() 263 264 with working_dir(self.build_directory, create=True) ``` And this is the spack-build.out: ``` ... checking for a BSD-compatible install... /usr/bin/install -c checking whether build environment is sane... yes checking for a thread-safe mkdir -p... /bin/mkdir -p checking for gawk... gawk checking whether make sets $(MAKE)... yes checking whether make supports nested variables... yes configure: error: source directory already configured; run "make distclean" there first ``` Before merge #2859 I could install it correctly but now I get this, do you think that could be related with the commit or maybe I have to install it in a different way now? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `var/spack/repos/builtin/packages/libmng/package.py` Content: ``` 1 ############################################################################## 2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. 3 # Produced at the Lawrence Livermore National Laboratory. 4 # 5 # This file is part of Spack. 6 # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. 7 # LLNL-CODE-647188 8 # 9 # For details, see https://github.com/llnl/spack 10 # Please also see the LICENSE file for our notice and the LGPL. 11 # 12 # This program is free software; you can redistribute it and/or modify 13 # it under the terms of the GNU Lesser General Public License (as 14 # published by the Free Software Foundation) version 2.1, February 1999. 15 # 16 # This program is distributed in the hope that it will be useful, but 17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF 18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and 19 # conditions of the GNU Lesser General Public License for more details. 20 # 21 # You should have received a copy of the GNU Lesser General Public 22 # License along with this program; if not, write to the Free Software 23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 ############################################################################## 25 from spack import * 26 27 28 class Libmng(AutotoolsPackage): 29 """libmng -THE reference library for reading, displaying, writing 30 and examining Multiple-Image Network Graphics. MNG is the animation 31 extension to the popular PNG image-format.""" 32 homepage = "http://sourceforge.net/projects/libmng/" 33 url = "http://downloads.sourceforge.net/project/libmng/libmng-devel/2.0.2/libmng-2.0.2.tar.gz" 34 35 version('2.0.2', '1ffefaed4aac98475ee6267422cbca55') 36 37 depends_on("jpeg") 38 depends_on("zlib") 39 depends_on("lcms") 40 41 def patch(self): 42 # jpeg requires stdio to beincluded before its headrs. 43 filter_file(r'^(\#include \<jpeglib\.h\>)', 44 '#include<stdio.h>\n\\1', 'libmng_types.h') 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/var/spack/repos/builtin/packages/libmng/package.py b/var/spack/repos/builtin/packages/libmng/package.py --- a/var/spack/repos/builtin/packages/libmng/package.py +++ b/var/spack/repos/builtin/packages/libmng/package.py @@ -42,3 +42,7 @@ # jpeg requires stdio to beincluded before its headrs. filter_file(r'^(\#include \<jpeglib\.h\>)', '#include<stdio.h>\n\\1', 'libmng_types.h') + + @run_before('configure') + def clean_configure_directory(self): + make('distclean')
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/libmng/package.py b/var/spack/repos/builtin/packages/libmng/package.py\n--- a/var/spack/repos/builtin/packages/libmng/package.py\n+++ b/var/spack/repos/builtin/packages/libmng/package.py\n@@ -42,3 +42,7 @@\n # jpeg requires stdio to beincluded before its headrs.\n filter_file(r'^(\\#include \\<jpeglib\\.h\\>)',\n '#include<stdio.h>\\n\\\\1', 'libmng_types.h')\n+\n+ @run_before('configure')\n+ def clean_configure_directory(self):\n+ make('distclean')\n", "issue": "Fail to build libmng [AutotoolsPackage]\n@alalazo I got this error while installing `libmng`:\r\n\r\n```\r\n...\r\n==> Already patched libmng\r\n==> Building libmng [AutotoolsPackage]\r\n==> Executing phase : 'autoreconf'\r\n==> Executing phase : 'configure'\r\n==> Error: ProcessError: Command exited with status 1:\r\n '/my/path/spack/var/spack/stage/libmng-2.0.2-2x5fkukzf3sf4uexegr3n35jwmy5pclu/libmng-2.0.2/configure' '--prefix=/my/path/spack/opt/spack/linux-scientificcernslc6-x86_64/gcc-6.2.0/libmng-2.0.2-2x5fkukzf3sf4uexegr3n35jwmy5pclu'\r\n/my/path/spack/lib/spack/spack/build_systems/autotools.py:265, in configure:\r\n 258 def configure(self, spec, prefix):\r\n 259 \"\"\"Runs configure with the arguments specified in :py:meth:`.configure_args`\r\n 260 and an appropriately set prefix.\r\n 261 \"\"\"\r\n 262 options = ['--prefix={0}'.format(prefix)] + self.configure_args()\r\n 263 \r\n 264 with working_dir(self.build_directory, create=True)\r\n```\r\n\r\nAnd this is the spack-build.out:\r\n\r\n```\r\n...\r\nchecking for a BSD-compatible install... /usr/bin/install -c\r\nchecking whether build environment is sane... yes\r\nchecking for a thread-safe mkdir -p... /bin/mkdir -p\r\nchecking for gawk... gawk\r\nchecking whether make sets $(MAKE)... yes\r\nchecking whether make supports nested variables... yes\r\nconfigure: error: source directory already configured; run \"make distclean\" there first\r\n```\r\n\r\nBefore merge #2859 I could install it correctly but now I get this, do you think that could be related with the commit or maybe I have to install it in a different way now?\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Libmng(AutotoolsPackage):\n \"\"\"libmng -THE reference library for reading, displaying, writing\n and examining Multiple-Image Network Graphics. MNG is the animation\n extension to the popular PNG image-format.\"\"\"\n homepage = \"http://sourceforge.net/projects/libmng/\"\n url = \"http://downloads.sourceforge.net/project/libmng/libmng-devel/2.0.2/libmng-2.0.2.tar.gz\"\n\n version('2.0.2', '1ffefaed4aac98475ee6267422cbca55')\n\n depends_on(\"jpeg\")\n depends_on(\"zlib\")\n depends_on(\"lcms\")\n\n def patch(self):\n # jpeg requires stdio to beincluded before its headrs.\n filter_file(r'^(\\#include \\<jpeglib\\.h\\>)',\n '#include<stdio.h>\\n\\\\1', 'libmng_types.h')\n", "path": "var/spack/repos/builtin/packages/libmng/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Libmng(AutotoolsPackage):\n \"\"\"libmng -THE reference library for reading, displaying, writing\n and examining Multiple-Image Network Graphics. MNG is the animation\n extension to the popular PNG image-format.\"\"\"\n homepage = \"http://sourceforge.net/projects/libmng/\"\n url = \"http://downloads.sourceforge.net/project/libmng/libmng-devel/2.0.2/libmng-2.0.2.tar.gz\"\n\n version('2.0.2', '1ffefaed4aac98475ee6267422cbca55')\n\n depends_on(\"jpeg\")\n depends_on(\"zlib\")\n depends_on(\"lcms\")\n\n def patch(self):\n # jpeg requires stdio to beincluded before its headrs.\n filter_file(r'^(\\#include \\<jpeglib\\.h\\>)',\n '#include<stdio.h>\\n\\\\1', 'libmng_types.h')\n\n @run_before('configure')\n def clean_configure_directory(self):\n make('distclean')\n", "path": "var/spack/repos/builtin/packages/libmng/package.py"}]}
1,312
143
gh_patches_debug_36111
rasdani/github-patches
git_diff
pantsbuild__pants-8673
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Deprecate using default `--fmt-transitive` and `--lint-transitive` in preparation for switching default to `--no-{fmt,lint}-transitive` ### Problem Will close https://github.com/pantsbuild/pants/issues/8345. These options cause tools like isort and Scalafmt to work on the transitive dependencies of the targets you specify, rather than only the targets specified. This is surprising and not how the tools work when called directly—you'd expect isort to only change the files you pass to it, for example. We decided when adding this option to the V2 rules (https://github.com/pantsbuild/pants/pull/8660) that instead we should deprecate this misfeature. However, we cannot simply deprecate the option in one fell-swoop because then people who are trying to prepare for the default behavior changing to `--no-transitive` will be met with a deprecation warning that the option will be removed. Leaving off the option so that there's no deprecation warning means that they will have a breaking behavior change in 1.25.0.dev2 when we no longer act transitively. ### Solution For this deprecation cycle, only warn that the default will change if they are currently relying on the default. In 1.25.0.dev2, after the default changes, _then_ we can safely deprecate the option outright. ### Result Users who specified the option get this warning message: > [WARN] /Users/eric/DocsLocal/code/projects/pants/src/python/pants/task/task.py:265: DeprecationWarning: DEPRECATED: Pants defaulting to --fmt-transitive and --lint-transitive will be removed in version 1.25.0.dev2. Pants will soon default to --no-fmt-transitive and --no-lint-transitive. Currently, Pants defaults to `--fmt-transitive` and `--lint-transitive`, which means that tools like isort and Scalafmt will work on transitive dependencies as well. This behavior is unexpected. Normally when running tools like isort, you'd expect them to only work on the files you specify. > > To prepare, please add to your `pants.ini` under both the `fmt` and the `lint` sections the option `transitive: False`. If you want to keep the default, use `True`, although the option will be removed in Pants 1.27.0.dev2 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/python/pants/task/target_restriction_mixins.py` Content: ``` 1 # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 from pants.base.deprecated import deprecated_conditional 5 from pants.task.goal_options_mixin import GoalOptionsMixin, GoalOptionsRegistrar 6 7 8 class HasTransitiveOptionMixin: 9 """A mixin for tasks that have a --transitive option. 10 11 Some tasks must always act on the entire dependency closure. E.g., when compiling, one must 12 compile all of a target's dependencies before compiling that target. 13 14 Other tasks must always act only on the target roots (the targets explicitly specified by the 15 user on the command line). E.g., when finding paths between two user-specified targets. 16 17 Still other tasks may optionally act on either the target roots or the entire closure, 18 as the user prefers in each case. E.g., when invoking a linter. This mixin supports such tasks. 19 20 Note that this mixin doesn't actually register the --transitive option. It assumes that this 21 option was registered on the task (either directly or recursively from its goal). 22 """ 23 24 @property 25 def act_transitively(self): 26 deprecated_conditional( 27 lambda: self.get_options().is_default("transitive"), 28 removal_version="1.25.0.dev2", 29 entity_description="Pants defaulting to `--fmt-transitive` and `--lint-transitive`", 30 hint_message="Pants will soon default to `--no-fmt-transitive` and `--no-lint-transitive`. " 31 "Currently, Pants defaults to `--fmt-transitive` and `--lint-transitive`, which " 32 "means that tools like isort and Scalafmt will work on transitive dependencies " 33 "as well. This behavior is unexpected. Normally when running tools like isort, " 34 "you'd expect them to only work on the files you specify.\n\nTo prepare, " 35 "please add to your `pants.ini` under both the `fmt` and the `lint` " 36 "sections the option `transitive: False`. If you want to keep the default, use " 37 "`True`, although we recommend setting to `False` as the `--transitive` option " 38 "will be removed in a future Pants version." 39 ) 40 return self.get_options().transitive 41 42 43 class TransitiveOptionRegistrar: 44 """Registrar of --transitive.""" 45 46 @classmethod 47 def register_options(cls, register): 48 super().register_options(register) 49 register('--transitive', type=bool, default=True, fingerprint=True, recursive=True, 50 help="If false, act only on the targets directly specified on the command line. " 51 "If true, act on the transitive dependency closure of those targets.") 52 53 54 class HasSkipOptionMixin: 55 """A mixin for tasks that have a --skip option. 56 57 Some tasks may be skipped during certain usages. E.g., you may not want to apply linters 58 while developing. This mixin supports such tasks. 59 60 Note that this mixin doesn't actually register the --skip option. It assumes that this 61 option was registered on the task (either directly or recursively from its goal). 62 """ 63 64 @property 65 def skip_execution(self): 66 return self.get_options().skip 67 68 69 class SkipOptionRegistrar: 70 """Registrar of --skip.""" 71 72 @classmethod 73 def register_options(cls, register): 74 super().register_options(register) 75 register('--skip', type=bool, default=False, fingerprint=True, recursive=True, 76 help='Skip task.') 77 78 79 class HasSkipAndTransitiveOptionsMixin(HasSkipOptionMixin, HasTransitiveOptionMixin): 80 """A mixin for tasks that have a --transitive and a --skip option.""" 81 pass 82 83 84 class HasSkipAndTransitiveGoalOptionsMixin(GoalOptionsMixin, HasSkipAndTransitiveOptionsMixin): 85 """A mixin for tasks that have a --transitive and a --skip option registered at the goal level.""" 86 pass 87 88 89 class SkipAndTransitiveOptionsRegistrar(SkipOptionRegistrar, TransitiveOptionRegistrar): 90 """Registrar of --skip and --transitive.""" 91 pass 92 93 94 class SkipAndTransitiveGoalOptionsRegistrar(SkipAndTransitiveOptionsRegistrar, 95 GoalOptionsRegistrar): 96 """Registrar of --skip and --transitive at the goal level.""" 97 pass 98 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/python/pants/task/target_restriction_mixins.py b/src/python/pants/task/target_restriction_mixins.py --- a/src/python/pants/task/target_restriction_mixins.py +++ b/src/python/pants/task/target_restriction_mixins.py @@ -23,20 +23,6 @@ @property def act_transitively(self): - deprecated_conditional( - lambda: self.get_options().is_default("transitive"), - removal_version="1.25.0.dev2", - entity_description="Pants defaulting to `--fmt-transitive` and `--lint-transitive`", - hint_message="Pants will soon default to `--no-fmt-transitive` and `--no-lint-transitive`. " - "Currently, Pants defaults to `--fmt-transitive` and `--lint-transitive`, which " - "means that tools like isort and Scalafmt will work on transitive dependencies " - "as well. This behavior is unexpected. Normally when running tools like isort, " - "you'd expect them to only work on the files you specify.\n\nTo prepare, " - "please add to your `pants.ini` under both the `fmt` and the `lint` " - "sections the option `transitive: False`. If you want to keep the default, use " - "`True`, although we recommend setting to `False` as the `--transitive` option " - "will be removed in a future Pants version." - ) return self.get_options().transitive @@ -83,7 +69,24 @@ class HasSkipAndTransitiveGoalOptionsMixin(GoalOptionsMixin, HasSkipAndTransitiveOptionsMixin): """A mixin for tasks that have a --transitive and a --skip option registered at the goal level.""" - pass + + @property + def act_transitively(self): + deprecated_conditional( + lambda: self.get_options().is_default("transitive"), + removal_version="1.25.0.dev2", + entity_description="Pants defaulting to `--fmt-transitive` and `--lint-transitive`", + hint_message="Pants will soon default to `--no-fmt-transitive` and `--no-lint-transitive`. " + "Currently, Pants defaults to `--fmt-transitive` and `--lint-transitive`, which " + "means that tools like isort and Scalafmt will work on transitive dependencies " + "as well. This behavior is unexpected. Normally when running tools like isort, " + "you'd expect them to only work on the files you specify.\n\nTo prepare, " + "please add to your `pants.ini` under both the `fmt` and the `lint` " + "sections the option `transitive: False`. If you want to keep the default, use " + "`True`, although we recommend setting to `False` as the `--transitive` option " + "will be removed in a future Pants version." + ) + return self.get_options().transitive class SkipAndTransitiveOptionsRegistrar(SkipOptionRegistrar, TransitiveOptionRegistrar):
{"golden_diff": "diff --git a/src/python/pants/task/target_restriction_mixins.py b/src/python/pants/task/target_restriction_mixins.py\n--- a/src/python/pants/task/target_restriction_mixins.py\n+++ b/src/python/pants/task/target_restriction_mixins.py\n@@ -23,20 +23,6 @@\n \n @property\n def act_transitively(self):\n- deprecated_conditional(\n- lambda: self.get_options().is_default(\"transitive\"),\n- removal_version=\"1.25.0.dev2\",\n- entity_description=\"Pants defaulting to `--fmt-transitive` and `--lint-transitive`\",\n- hint_message=\"Pants will soon default to `--no-fmt-transitive` and `--no-lint-transitive`. \"\n- \"Currently, Pants defaults to `--fmt-transitive` and `--lint-transitive`, which \"\n- \"means that tools like isort and Scalafmt will work on transitive dependencies \"\n- \"as well. This behavior is unexpected. Normally when running tools like isort, \"\n- \"you'd expect them to only work on the files you specify.\\n\\nTo prepare, \"\n- \"please add to your `pants.ini` under both the `fmt` and the `lint` \"\n- \"sections the option `transitive: False`. If you want to keep the default, use \"\n- \"`True`, although we recommend setting to `False` as the `--transitive` option \"\n- \"will be removed in a future Pants version.\"\n- )\n return self.get_options().transitive\n \n \n@@ -83,7 +69,24 @@\n \n class HasSkipAndTransitiveGoalOptionsMixin(GoalOptionsMixin, HasSkipAndTransitiveOptionsMixin):\n \"\"\"A mixin for tasks that have a --transitive and a --skip option registered at the goal level.\"\"\"\n- pass\n+\n+ @property\n+ def act_transitively(self):\n+ deprecated_conditional(\n+ lambda: self.get_options().is_default(\"transitive\"),\n+ removal_version=\"1.25.0.dev2\",\n+ entity_description=\"Pants defaulting to `--fmt-transitive` and `--lint-transitive`\",\n+ hint_message=\"Pants will soon default to `--no-fmt-transitive` and `--no-lint-transitive`. \"\n+ \"Currently, Pants defaults to `--fmt-transitive` and `--lint-transitive`, which \"\n+ \"means that tools like isort and Scalafmt will work on transitive dependencies \"\n+ \"as well. This behavior is unexpected. Normally when running tools like isort, \"\n+ \"you'd expect them to only work on the files you specify.\\n\\nTo prepare, \"\n+ \"please add to your `pants.ini` under both the `fmt` and the `lint` \"\n+ \"sections the option `transitive: False`. If you want to keep the default, use \"\n+ \"`True`, although we recommend setting to `False` as the `--transitive` option \"\n+ \"will be removed in a future Pants version.\"\n+ )\n+ return self.get_options().transitive\n \n \n class SkipAndTransitiveOptionsRegistrar(SkipOptionRegistrar, TransitiveOptionRegistrar):\n", "issue": "Deprecate using default `--fmt-transitive` and `--lint-transitive` in preparation for switching default to `--no-{fmt,lint}-transitive`\n### Problem\r\n\r\nWill close https://github.com/pantsbuild/pants/issues/8345.\r\n\r\nThese options cause tools like isort and Scalafmt to work on the transitive dependencies of the targets you specify, rather than only the targets specified. This is surprising and not how the tools work when called directly\u2014you'd expect isort to only change the files you pass to it, for example.\r\n\r\nWe decided when adding this option to the V2 rules (https://github.com/pantsbuild/pants/pull/8660) that instead we should deprecate this misfeature.\r\n\r\nHowever, we cannot simply deprecate the option in one fell-swoop because then people who are trying to prepare for the default behavior changing to `--no-transitive` will be met with a deprecation warning that the option will be removed. Leaving off the option so that there's no deprecation warning means that they will have a breaking behavior change in 1.25.0.dev2 when we no longer act transitively.\r\n\r\n### Solution\r\n\r\nFor this deprecation cycle, only warn that the default will change if they are currently relying on the default.\r\n\r\nIn 1.25.0.dev2, after the default changes, _then_ we can safely deprecate the option outright.\r\n\r\n### Result\r\n\r\nUsers who specified the option get this warning message:\r\n\r\n\r\n> [WARN] /Users/eric/DocsLocal/code/projects/pants/src/python/pants/task/task.py:265: DeprecationWarning: DEPRECATED: Pants defaulting to --fmt-transitive and --lint-transitive will be removed in version 1.25.0.dev2.\r\n Pants will soon default to --no-fmt-transitive and --no-lint-transitive. Currently, Pants defaults to `--fmt-transitive` and `--lint-transitive`, which means that tools like isort and Scalafmt will work on transitive dependencies as well. This behavior is unexpected. Normally when running tools like isort, you'd expect them to only work on the files you specify.\r\n> \r\n> To prepare, please add to your `pants.ini` under both the `fmt` and the `lint` sections the option `transitive: False`. If you want to keep the default, use `True`, although the option will be removed in Pants 1.27.0.dev2\n", "before_files": [{"content": "# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom pants.base.deprecated import deprecated_conditional\nfrom pants.task.goal_options_mixin import GoalOptionsMixin, GoalOptionsRegistrar\n\n\nclass HasTransitiveOptionMixin:\n \"\"\"A mixin for tasks that have a --transitive option.\n\n Some tasks must always act on the entire dependency closure. E.g., when compiling, one must\n compile all of a target's dependencies before compiling that target.\n\n Other tasks must always act only on the target roots (the targets explicitly specified by the\n user on the command line). E.g., when finding paths between two user-specified targets.\n\n Still other tasks may optionally act on either the target roots or the entire closure,\n as the user prefers in each case. E.g., when invoking a linter. This mixin supports such tasks.\n\n Note that this mixin doesn't actually register the --transitive option. It assumes that this\n option was registered on the task (either directly or recursively from its goal).\n \"\"\"\n\n @property\n def act_transitively(self):\n deprecated_conditional(\n lambda: self.get_options().is_default(\"transitive\"),\n removal_version=\"1.25.0.dev2\",\n entity_description=\"Pants defaulting to `--fmt-transitive` and `--lint-transitive`\",\n hint_message=\"Pants will soon default to `--no-fmt-transitive` and `--no-lint-transitive`. \"\n \"Currently, Pants defaults to `--fmt-transitive` and `--lint-transitive`, which \"\n \"means that tools like isort and Scalafmt will work on transitive dependencies \"\n \"as well. This behavior is unexpected. Normally when running tools like isort, \"\n \"you'd expect them to only work on the files you specify.\\n\\nTo prepare, \"\n \"please add to your `pants.ini` under both the `fmt` and the `lint` \"\n \"sections the option `transitive: False`. If you want to keep the default, use \"\n \"`True`, although we recommend setting to `False` as the `--transitive` option \"\n \"will be removed in a future Pants version.\"\n )\n return self.get_options().transitive\n\n\nclass TransitiveOptionRegistrar:\n \"\"\"Registrar of --transitive.\"\"\"\n\n @classmethod\n def register_options(cls, register):\n super().register_options(register)\n register('--transitive', type=bool, default=True, fingerprint=True, recursive=True,\n help=\"If false, act only on the targets directly specified on the command line. \"\n \"If true, act on the transitive dependency closure of those targets.\")\n\n\nclass HasSkipOptionMixin:\n \"\"\"A mixin for tasks that have a --skip option.\n\n Some tasks may be skipped during certain usages. E.g., you may not want to apply linters\n while developing. This mixin supports such tasks.\n\n Note that this mixin doesn't actually register the --skip option. It assumes that this\n option was registered on the task (either directly or recursively from its goal).\n \"\"\"\n\n @property\n def skip_execution(self):\n return self.get_options().skip\n\n\nclass SkipOptionRegistrar:\n \"\"\"Registrar of --skip.\"\"\"\n\n @classmethod\n def register_options(cls, register):\n super().register_options(register)\n register('--skip', type=bool, default=False, fingerprint=True, recursive=True,\n help='Skip task.')\n\n\nclass HasSkipAndTransitiveOptionsMixin(HasSkipOptionMixin, HasTransitiveOptionMixin):\n \"\"\"A mixin for tasks that have a --transitive and a --skip option.\"\"\"\n pass\n\n\nclass HasSkipAndTransitiveGoalOptionsMixin(GoalOptionsMixin, HasSkipAndTransitiveOptionsMixin):\n \"\"\"A mixin for tasks that have a --transitive and a --skip option registered at the goal level.\"\"\"\n pass\n\n\nclass SkipAndTransitiveOptionsRegistrar(SkipOptionRegistrar, TransitiveOptionRegistrar):\n \"\"\"Registrar of --skip and --transitive.\"\"\"\n pass\n\n\nclass SkipAndTransitiveGoalOptionsRegistrar(SkipAndTransitiveOptionsRegistrar,\n GoalOptionsRegistrar):\n \"\"\"Registrar of --skip and --transitive at the goal level.\"\"\"\n pass\n", "path": "src/python/pants/task/target_restriction_mixins.py"}], "after_files": [{"content": "# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom pants.base.deprecated import deprecated_conditional\nfrom pants.task.goal_options_mixin import GoalOptionsMixin, GoalOptionsRegistrar\n\n\nclass HasTransitiveOptionMixin:\n \"\"\"A mixin for tasks that have a --transitive option.\n\n Some tasks must always act on the entire dependency closure. E.g., when compiling, one must\n compile all of a target's dependencies before compiling that target.\n\n Other tasks must always act only on the target roots (the targets explicitly specified by the\n user on the command line). E.g., when finding paths between two user-specified targets.\n\n Still other tasks may optionally act on either the target roots or the entire closure,\n as the user prefers in each case. E.g., when invoking a linter. This mixin supports such tasks.\n\n Note that this mixin doesn't actually register the --transitive option. It assumes that this\n option was registered on the task (either directly or recursively from its goal).\n \"\"\"\n\n @property\n def act_transitively(self):\n return self.get_options().transitive\n\n\nclass TransitiveOptionRegistrar:\n \"\"\"Registrar of --transitive.\"\"\"\n\n @classmethod\n def register_options(cls, register):\n super().register_options(register)\n register('--transitive', type=bool, default=True, fingerprint=True, recursive=True,\n help=\"If false, act only on the targets directly specified on the command line. \"\n \"If true, act on the transitive dependency closure of those targets.\")\n\n\nclass HasSkipOptionMixin:\n \"\"\"A mixin for tasks that have a --skip option.\n\n Some tasks may be skipped during certain usages. E.g., you may not want to apply linters\n while developing. This mixin supports such tasks.\n\n Note that this mixin doesn't actually register the --skip option. It assumes that this\n option was registered on the task (either directly or recursively from its goal).\n \"\"\"\n\n @property\n def skip_execution(self):\n return self.get_options().skip\n\n\nclass SkipOptionRegistrar:\n \"\"\"Registrar of --skip.\"\"\"\n\n @classmethod\n def register_options(cls, register):\n super().register_options(register)\n register('--skip', type=bool, default=False, fingerprint=True, recursive=True,\n help='Skip task.')\n\n\nclass HasSkipAndTransitiveOptionsMixin(HasSkipOptionMixin, HasTransitiveOptionMixin):\n \"\"\"A mixin for tasks that have a --transitive and a --skip option.\"\"\"\n pass\n\n\nclass HasSkipAndTransitiveGoalOptionsMixin(GoalOptionsMixin, HasSkipAndTransitiveOptionsMixin):\n \"\"\"A mixin for tasks that have a --transitive and a --skip option registered at the goal level.\"\"\"\n\n @property\n def act_transitively(self):\n deprecated_conditional(\n lambda: self.get_options().is_default(\"transitive\"),\n removal_version=\"1.25.0.dev2\",\n entity_description=\"Pants defaulting to `--fmt-transitive` and `--lint-transitive`\",\n hint_message=\"Pants will soon default to `--no-fmt-transitive` and `--no-lint-transitive`. \"\n \"Currently, Pants defaults to `--fmt-transitive` and `--lint-transitive`, which \"\n \"means that tools like isort and Scalafmt will work on transitive dependencies \"\n \"as well. This behavior is unexpected. Normally when running tools like isort, \"\n \"you'd expect them to only work on the files you specify.\\n\\nTo prepare, \"\n \"please add to your `pants.ini` under both the `fmt` and the `lint` \"\n \"sections the option `transitive: False`. If you want to keep the default, use \"\n \"`True`, although we recommend setting to `False` as the `--transitive` option \"\n \"will be removed in a future Pants version.\"\n )\n return self.get_options().transitive\n\n\nclass SkipAndTransitiveOptionsRegistrar(SkipOptionRegistrar, TransitiveOptionRegistrar):\n \"\"\"Registrar of --skip and --transitive.\"\"\"\n pass\n\n\nclass SkipAndTransitiveGoalOptionsRegistrar(SkipAndTransitiveOptionsRegistrar,\n GoalOptionsRegistrar):\n \"\"\"Registrar of --skip and --transitive at the goal level.\"\"\"\n pass\n", "path": "src/python/pants/task/target_restriction_mixins.py"}]}
1,904
707
gh_patches_debug_66276
rasdani/github-patches
git_diff
python-poetry__poetry-979
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- --no-root behavior is inverted on latest develop [This](https://github.com/sdispater/poetry/commit/37ec1447b3508ee0bbdb41f8e5773ed5bfae0654#diff-427299ba040b8502b4d29846e595c2d0R59) should probably be `if self.option("no-root")`, to _not_ install the root package when `--no-root` is provided. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `poetry/console/commands/install.py` Content: ``` 1 import os 2 3 from .env_command import EnvCommand 4 5 6 class InstallCommand(EnvCommand): 7 """ 8 Installs the project dependencies. 9 10 install 11 { --no-dev : Do not install dev dependencies. } 12 { --no-root : Do not install the root package (your project). } 13 { --dry-run : Outputs the operations but will not execute anything 14 (implicitly enables --verbose). } 15 { --E|extras=* : Extra sets of dependencies to install. } 16 { --develop=* : Install given packages in development mode. } 17 """ 18 19 help = """The <info>install</info> command reads the <comment>poetry.lock</> file from 20 the current directory, processes it, and downloads and installs all the 21 libraries and dependencies outlined in that file. If the file does not 22 exist it will look for <comment>pyproject.toml</> and do the same. 23 24 <info>poetry install</info> 25 """ 26 27 _loggers = ["poetry.repositories.pypi_repository"] 28 29 def handle(self): 30 from clikit.io import NullIO 31 from poetry.installation import Installer 32 from poetry.masonry.builders import SdistBuilder 33 from poetry.masonry.utils.module import ModuleOrPackageNotFound 34 from poetry.utils._compat import decode 35 from poetry.utils.env import NullEnv 36 37 installer = Installer( 38 self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool 39 ) 40 41 extras = [] 42 for extra in self.option("extras"): 43 if " " in extra: 44 extras += [e.strip() for e in extra.split(" ")] 45 else: 46 extras.append(extra) 47 48 installer.extras(extras) 49 installer.dev_mode(not self.option("no-dev")) 50 installer.develop(self.option("develop")) 51 installer.dry_run(self.option("dry-run")) 52 installer.verbose(self.option("verbose")) 53 54 return_code = installer.run() 55 56 if return_code != 0: 57 return return_code 58 59 if not self.option("no-root"): 60 return 0 61 62 try: 63 builder = SdistBuilder(self.poetry, NullEnv(), NullIO()) 64 except ModuleOrPackageNotFound: 65 # This is likely due to the fact that the project is an application 66 # not following the structure expected by Poetry 67 # If this is a true error it will be picked up later by build anyway. 68 return 0 69 70 self.line( 71 " - Installing <info>{}</info> (<comment>{}</comment>)".format( 72 self.poetry.package.pretty_name, self.poetry.package.pretty_version 73 ) 74 ) 75 76 if self.option("dry-run"): 77 return 0 78 79 setup = self.poetry.file.parent / "setup.py" 80 has_setup = setup.exists() 81 82 if has_setup: 83 self.line("<warning>A setup.py file already exists. Using it.</warning>") 84 else: 85 with setup.open("w", encoding="utf-8") as f: 86 f.write(decode(builder.build_setup())) 87 88 try: 89 self.env.run("pip", "install", "-e", str(setup.parent), "--no-deps") 90 finally: 91 if not has_setup: 92 os.remove(str(setup)) 93 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/poetry/console/commands/install.py b/poetry/console/commands/install.py --- a/poetry/console/commands/install.py +++ b/poetry/console/commands/install.py @@ -56,7 +56,7 @@ if return_code != 0: return return_code - if not self.option("no-root"): + if self.option("no-root"): return 0 try:
{"golden_diff": "diff --git a/poetry/console/commands/install.py b/poetry/console/commands/install.py\n--- a/poetry/console/commands/install.py\n+++ b/poetry/console/commands/install.py\n@@ -56,7 +56,7 @@\n if return_code != 0:\n return return_code\n \n- if not self.option(\"no-root\"):\n+ if self.option(\"no-root\"):\n return 0\n \n try:\n", "issue": "--no-root behavior is inverted on latest develop\n[This](https://github.com/sdispater/poetry/commit/37ec1447b3508ee0bbdb41f8e5773ed5bfae0654#diff-427299ba040b8502b4d29846e595c2d0R59) should probably be `if self.option(\"no-root\")`, to _not_ install the root package when `--no-root` is provided.\n", "before_files": [{"content": "import os\n\nfrom .env_command import EnvCommand\n\n\nclass InstallCommand(EnvCommand):\n \"\"\"\n Installs the project dependencies.\n\n install\n { --no-dev : Do not install dev dependencies. }\n { --no-root : Do not install the root package (your project). }\n { --dry-run : Outputs the operations but will not execute anything\n (implicitly enables --verbose). }\n { --E|extras=* : Extra sets of dependencies to install. }\n { --develop=* : Install given packages in development mode. }\n \"\"\"\n\n help = \"\"\"The <info>install</info> command reads the <comment>poetry.lock</> file from\nthe current directory, processes it, and downloads and installs all the\nlibraries and dependencies outlined in that file. If the file does not\nexist it will look for <comment>pyproject.toml</> and do the same.\n\n<info>poetry install</info>\n\"\"\"\n\n _loggers = [\"poetry.repositories.pypi_repository\"]\n\n def handle(self):\n from clikit.io import NullIO\n from poetry.installation import Installer\n from poetry.masonry.builders import SdistBuilder\n from poetry.masonry.utils.module import ModuleOrPackageNotFound\n from poetry.utils._compat import decode\n from poetry.utils.env import NullEnv\n\n installer = Installer(\n self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool\n )\n\n extras = []\n for extra in self.option(\"extras\"):\n if \" \" in extra:\n extras += [e.strip() for e in extra.split(\" \")]\n else:\n extras.append(extra)\n\n installer.extras(extras)\n installer.dev_mode(not self.option(\"no-dev\"))\n installer.develop(self.option(\"develop\"))\n installer.dry_run(self.option(\"dry-run\"))\n installer.verbose(self.option(\"verbose\"))\n\n return_code = installer.run()\n\n if return_code != 0:\n return return_code\n\n if not self.option(\"no-root\"):\n return 0\n\n try:\n builder = SdistBuilder(self.poetry, NullEnv(), NullIO())\n except ModuleOrPackageNotFound:\n # This is likely due to the fact that the project is an application\n # not following the structure expected by Poetry\n # If this is a true error it will be picked up later by build anyway.\n return 0\n\n self.line(\n \" - Installing <info>{}</info> (<comment>{}</comment>)\".format(\n self.poetry.package.pretty_name, self.poetry.package.pretty_version\n )\n )\n\n if self.option(\"dry-run\"):\n return 0\n\n setup = self.poetry.file.parent / \"setup.py\"\n has_setup = setup.exists()\n\n if has_setup:\n self.line(\"<warning>A setup.py file already exists. Using it.</warning>\")\n else:\n with setup.open(\"w\", encoding=\"utf-8\") as f:\n f.write(decode(builder.build_setup()))\n\n try:\n self.env.run(\"pip\", \"install\", \"-e\", str(setup.parent), \"--no-deps\")\n finally:\n if not has_setup:\n os.remove(str(setup))\n", "path": "poetry/console/commands/install.py"}], "after_files": [{"content": "import os\n\nfrom .env_command import EnvCommand\n\n\nclass InstallCommand(EnvCommand):\n \"\"\"\n Installs the project dependencies.\n\n install\n { --no-dev : Do not install dev dependencies. }\n { --no-root : Do not install the root package (your project). }\n { --dry-run : Outputs the operations but will not execute anything\n (implicitly enables --verbose). }\n { --E|extras=* : Extra sets of dependencies to install. }\n { --develop=* : Install given packages in development mode. }\n \"\"\"\n\n help = \"\"\"The <info>install</info> command reads the <comment>poetry.lock</> file from\nthe current directory, processes it, and downloads and installs all the\nlibraries and dependencies outlined in that file. If the file does not\nexist it will look for <comment>pyproject.toml</> and do the same.\n\n<info>poetry install</info>\n\"\"\"\n\n _loggers = [\"poetry.repositories.pypi_repository\"]\n\n def handle(self):\n from clikit.io import NullIO\n from poetry.installation import Installer\n from poetry.masonry.builders import SdistBuilder\n from poetry.masonry.utils.module import ModuleOrPackageNotFound\n from poetry.utils._compat import decode\n from poetry.utils.env import NullEnv\n\n installer = Installer(\n self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool\n )\n\n extras = []\n for extra in self.option(\"extras\"):\n if \" \" in extra:\n extras += [e.strip() for e in extra.split(\" \")]\n else:\n extras.append(extra)\n\n installer.extras(extras)\n installer.dev_mode(not self.option(\"no-dev\"))\n installer.develop(self.option(\"develop\"))\n installer.dry_run(self.option(\"dry-run\"))\n installer.verbose(self.option(\"verbose\"))\n\n return_code = installer.run()\n\n if return_code != 0:\n return return_code\n\n if self.option(\"no-root\"):\n return 0\n\n try:\n builder = SdistBuilder(self.poetry, NullEnv(), NullIO())\n except ModuleOrPackageNotFound:\n # This is likely due to the fact that the project is an application\n # not following the structure expected by Poetry\n # If this is a true error it will be picked up later by build anyway.\n return 0\n\n self.line(\n \" - Installing <info>{}</info> (<comment>{}</comment>)\".format(\n self.poetry.package.pretty_name, self.poetry.package.pretty_version\n )\n )\n\n if self.option(\"dry-run\"):\n return 0\n\n setup = self.poetry.file.parent / \"setup.py\"\n has_setup = setup.exists()\n\n if has_setup:\n self.line(\"<warning>A setup.py file already exists. Using it.</warning>\")\n else:\n with setup.open(\"w\", encoding=\"utf-8\") as f:\n f.write(decode(builder.build_setup()))\n\n try:\n self.env.run(\"pip\", \"install\", \"-e\", str(setup.parent), \"--no-deps\")\n finally:\n if not has_setup:\n os.remove(str(setup))\n", "path": "poetry/console/commands/install.py"}]}
1,257
96
gh_patches_debug_25855
rasdani/github-patches
git_diff
liqd__a4-meinberlin-4956
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TemplateDoesNotExist: account/email/account_already_exists.de.email, account/email/account_already_exists.en.email Sentry Issue: [MEINBERLIN-DEV-81](https://sentry.liqd.net/organizations/liqd/issues/2951/?referrer=github_integration) ``` TemplateDoesNotExist: account/email/account_already_exists.de.email, account/email/account_already_exists.en.email (16 additional frame(s) were not displayed) ... File "meinberlin/apps/users/adapters.py", line 46, in send_mail return UserAccountEmail.send(email, template_name=template_prefix, **context) File "adhocracy4/emails/mixins.py", line 38, in send return cls().dispatch(object, *args, **kwargs) File "adhocracy4/emails/base.py", line 127, in dispatch (subject, text, html) = self.render(template, context) File "adhocracy4/emails/base.py", line 98, in render template = select_template( File "django/template/loader.py", line 47, in select_template raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `meinberlin/apps/users/forms.py` Content: ``` 1 import collections 2 3 from allauth.account.forms import SignupForm 4 from allauth.socialaccount.forms import SignupForm as SocialSignupForm 5 from django import forms 6 from django.contrib.auth import forms as auth_forms 7 from django.core.exceptions import ValidationError 8 from django.utils.translation import gettext_lazy as _ 9 from django.utils.translation import ngettext 10 11 from meinberlin.apps.captcha.fields import CaptcheckCaptchaField 12 from meinberlin.apps.organisations.models import Organisation 13 from meinberlin.apps.users.models import User 14 15 16 class UserAdminForm(auth_forms.UserChangeForm): 17 def clean(self): 18 groups = self.cleaned_data.get("groups") 19 group_list = groups.values_list("id", flat=True) 20 group_organisations = Organisation.objects.filter( 21 groups__in=group_list 22 ).values_list("name", flat=True) 23 duplicates = [ 24 item 25 for item, count in collections.Counter(group_organisations).items() 26 if count > 1 27 ] 28 if duplicates: 29 count = len(duplicates) 30 message = ngettext( 31 "User is member in more than one group " 32 "in this organisation: %(duplicates)s.", 33 "User is member in more than one group " 34 "in these organisations: %(duplicates)s.", 35 count, 36 ) % {"duplicates": ", ".join(duplicates)} 37 raise ValidationError(message) 38 return self.cleaned_data 39 40 def clean_username(self): 41 42 username = self.cleaned_data["username"] 43 try: 44 user = User.objects.get(username__iexact=username) 45 if user != self.instance: 46 raise forms.ValidationError( 47 User._meta.get_field("username").error_messages["unique"] 48 ) 49 except User.DoesNotExist: 50 pass 51 52 try: 53 user = User.objects.get(email__iexact=username) 54 if user != self.instance: 55 raise forms.ValidationError( 56 User._meta.get_field("username").error_messages["used_as_email"] 57 ) 58 except User.DoesNotExist: 59 pass 60 61 return username 62 63 64 class AddUserAdminForm(auth_forms.UserCreationForm): 65 def clean_username(self): 66 67 username = self.cleaned_data["username"] 68 user = User.objects.filter(username__iexact=username) 69 if user.exists(): 70 raise forms.ValidationError( 71 User._meta.get_field("username").error_messages["unique"] 72 ) 73 else: 74 user = User.objects.filter(email__iexact=username) 75 if user.exists(): 76 raise forms.ValidationError( 77 User._meta.get_field("username").error_messages["used_as_email"] 78 ) 79 return username 80 81 82 class TermsSignupForm(SignupForm): 83 terms_of_use = forms.BooleanField(label=_("Terms of use")) 84 get_newsletters = forms.BooleanField( 85 label=_("Newsletter"), 86 help_text=_( 87 "Yes, I would like to receive e-mail newsletters about " 88 "the projects I am following." 89 ), 90 required=False, 91 ) 92 get_notifications = forms.BooleanField( 93 label=_("Notifications"), 94 help_text=_( 95 "Yes, I would like to be notified by e-mail about the " 96 "start and end of participation opportunities. This " 97 "applies to all projects I follow. I also receive an " 98 "e-mail when someone comments on one of my " 99 "contributions." 100 ), 101 required=False, 102 initial=True, 103 ) 104 captcha = CaptcheckCaptchaField(label=_("I am not a robot")) 105 106 def __init__(self, *args, **kwargs): 107 super().__init__(*args, **kwargs) 108 self.fields["username"].help_text = _( 109 "Your username will appear publicly next to your posts." 110 ) 111 self.fields["email"].widget.attrs["autofocus"] = True 112 113 def save(self, request): 114 user = super(TermsSignupForm, self).save(request) 115 user.get_newsletters = self.cleaned_data["get_newsletters"] 116 user.get_notifications = self.cleaned_data["get_notifications"] 117 user.save() 118 return user 119 120 121 class SocialTermsSignupForm(SocialSignupForm): 122 terms_of_use = forms.BooleanField(label=_("Terms of use")) 123 get_newsletters = forms.BooleanField( 124 label=_("Newsletter"), 125 help_text=_( 126 "Yes, I would like to receive e-mail newsletters about " 127 "the projects I am following." 128 ), 129 required=False, 130 ) 131 get_notifications = forms.BooleanField( 132 label=_("Notifications"), 133 help_text=_( 134 "Yes, I would like to be notified by e-mail about the " 135 "start and end of participation opportunities. This " 136 "applies to all projects I follow. I also receive an " 137 "e-mail when someone comments on one of my " 138 "contributions." 139 ), 140 required=False, 141 initial=True, 142 ) 143 email = forms.EmailField(widget=forms.HiddenInput()) 144 145 def __init__(self, *args, **kwargs): 146 super().__init__(*args, **kwargs) 147 self.fields["username"].help_text = _( 148 "Your username will appear publicly next to your posts." 149 ) 150 151 def save(self, request): 152 user = super(SocialTermsSignupForm, self).save(request) 153 user.get_newsletters = self.cleaned_data["get_newsletters"] 154 user.get_notifications = self.cleaned_data["get_notifications"] 155 user.save() 156 return user 157 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/meinberlin/apps/users/forms.py b/meinberlin/apps/users/forms.py --- a/meinberlin/apps/users/forms.py +++ b/meinberlin/apps/users/forms.py @@ -38,7 +38,6 @@ return self.cleaned_data def clean_username(self): - username = self.cleaned_data["username"] try: user = User.objects.get(username__iexact=username) @@ -63,7 +62,6 @@ class AddUserAdminForm(auth_forms.UserCreationForm): def clean_username(self): - username = self.cleaned_data["username"] user = User.objects.filter(username__iexact=username) if user.exists(): @@ -112,10 +110,11 @@ def save(self, request): user = super(TermsSignupForm, self).save(request) - user.get_newsletters = self.cleaned_data["get_newsletters"] - user.get_notifications = self.cleaned_data["get_notifications"] - user.save() - return user + if user: + user.get_newsletters = self.cleaned_data["get_newsletters"] + user.get_notifications = self.cleaned_data["get_notifications"] + user.save() + return user class SocialTermsSignupForm(SocialSignupForm):
{"golden_diff": "diff --git a/meinberlin/apps/users/forms.py b/meinberlin/apps/users/forms.py\n--- a/meinberlin/apps/users/forms.py\n+++ b/meinberlin/apps/users/forms.py\n@@ -38,7 +38,6 @@\n return self.cleaned_data\n \n def clean_username(self):\n-\n username = self.cleaned_data[\"username\"]\n try:\n user = User.objects.get(username__iexact=username)\n@@ -63,7 +62,6 @@\n \n class AddUserAdminForm(auth_forms.UserCreationForm):\n def clean_username(self):\n-\n username = self.cleaned_data[\"username\"]\n user = User.objects.filter(username__iexact=username)\n if user.exists():\n@@ -112,10 +110,11 @@\n \n def save(self, request):\n user = super(TermsSignupForm, self).save(request)\n- user.get_newsletters = self.cleaned_data[\"get_newsletters\"]\n- user.get_notifications = self.cleaned_data[\"get_notifications\"]\n- user.save()\n- return user\n+ if user:\n+ user.get_newsletters = self.cleaned_data[\"get_newsletters\"]\n+ user.get_notifications = self.cleaned_data[\"get_notifications\"]\n+ user.save()\n+ return user\n \n \n class SocialTermsSignupForm(SocialSignupForm):\n", "issue": "TemplateDoesNotExist: account/email/account_already_exists.de.email, account/email/account_already_exists.en.email\nSentry Issue: [MEINBERLIN-DEV-81](https://sentry.liqd.net/organizations/liqd/issues/2951/?referrer=github_integration)\n\n```\nTemplateDoesNotExist: account/email/account_already_exists.de.email, account/email/account_already_exists.en.email\n(16 additional frame(s) were not displayed)\n...\n File \"meinberlin/apps/users/adapters.py\", line 46, in send_mail\n return UserAccountEmail.send(email, template_name=template_prefix, **context)\n File \"adhocracy4/emails/mixins.py\", line 38, in send\n return cls().dispatch(object, *args, **kwargs)\n File \"adhocracy4/emails/base.py\", line 127, in dispatch\n (subject, text, html) = self.render(template, context)\n File \"adhocracy4/emails/base.py\", line 98, in render\n template = select_template(\n File \"django/template/loader.py\", line 47, in select_template\n raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain)\n```\n", "before_files": [{"content": "import collections\n\nfrom allauth.account.forms import SignupForm\nfrom allauth.socialaccount.forms import SignupForm as SocialSignupForm\nfrom django import forms\nfrom django.contrib.auth import forms as auth_forms\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import ngettext\n\nfrom meinberlin.apps.captcha.fields import CaptcheckCaptchaField\nfrom meinberlin.apps.organisations.models import Organisation\nfrom meinberlin.apps.users.models import User\n\n\nclass UserAdminForm(auth_forms.UserChangeForm):\n def clean(self):\n groups = self.cleaned_data.get(\"groups\")\n group_list = groups.values_list(\"id\", flat=True)\n group_organisations = Organisation.objects.filter(\n groups__in=group_list\n ).values_list(\"name\", flat=True)\n duplicates = [\n item\n for item, count in collections.Counter(group_organisations).items()\n if count > 1\n ]\n if duplicates:\n count = len(duplicates)\n message = ngettext(\n \"User is member in more than one group \"\n \"in this organisation: %(duplicates)s.\",\n \"User is member in more than one group \"\n \"in these organisations: %(duplicates)s.\",\n count,\n ) % {\"duplicates\": \", \".join(duplicates)}\n raise ValidationError(message)\n return self.cleaned_data\n\n def clean_username(self):\n\n username = self.cleaned_data[\"username\"]\n try:\n user = User.objects.get(username__iexact=username)\n if user != self.instance:\n raise forms.ValidationError(\n User._meta.get_field(\"username\").error_messages[\"unique\"]\n )\n except User.DoesNotExist:\n pass\n\n try:\n user = User.objects.get(email__iexact=username)\n if user != self.instance:\n raise forms.ValidationError(\n User._meta.get_field(\"username\").error_messages[\"used_as_email\"]\n )\n except User.DoesNotExist:\n pass\n\n return username\n\n\nclass AddUserAdminForm(auth_forms.UserCreationForm):\n def clean_username(self):\n\n username = self.cleaned_data[\"username\"]\n user = User.objects.filter(username__iexact=username)\n if user.exists():\n raise forms.ValidationError(\n User._meta.get_field(\"username\").error_messages[\"unique\"]\n )\n else:\n user = User.objects.filter(email__iexact=username)\n if user.exists():\n raise forms.ValidationError(\n User._meta.get_field(\"username\").error_messages[\"used_as_email\"]\n )\n return username\n\n\nclass TermsSignupForm(SignupForm):\n terms_of_use = forms.BooleanField(label=_(\"Terms of use\"))\n get_newsletters = forms.BooleanField(\n label=_(\"Newsletter\"),\n help_text=_(\n \"Yes, I would like to receive e-mail newsletters about \"\n \"the projects I am following.\"\n ),\n required=False,\n )\n get_notifications = forms.BooleanField(\n label=_(\"Notifications\"),\n help_text=_(\n \"Yes, I would like to be notified by e-mail about the \"\n \"start and end of participation opportunities. This \"\n \"applies to all projects I follow. I also receive an \"\n \"e-mail when someone comments on one of my \"\n \"contributions.\"\n ),\n required=False,\n initial=True,\n )\n captcha = CaptcheckCaptchaField(label=_(\"I am not a robot\"))\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"username\"].help_text = _(\n \"Your username will appear publicly next to your posts.\"\n )\n self.fields[\"email\"].widget.attrs[\"autofocus\"] = True\n\n def save(self, request):\n user = super(TermsSignupForm, self).save(request)\n user.get_newsletters = self.cleaned_data[\"get_newsletters\"]\n user.get_notifications = self.cleaned_data[\"get_notifications\"]\n user.save()\n return user\n\n\nclass SocialTermsSignupForm(SocialSignupForm):\n terms_of_use = forms.BooleanField(label=_(\"Terms of use\"))\n get_newsletters = forms.BooleanField(\n label=_(\"Newsletter\"),\n help_text=_(\n \"Yes, I would like to receive e-mail newsletters about \"\n \"the projects I am following.\"\n ),\n required=False,\n )\n get_notifications = forms.BooleanField(\n label=_(\"Notifications\"),\n help_text=_(\n \"Yes, I would like to be notified by e-mail about the \"\n \"start and end of participation opportunities. This \"\n \"applies to all projects I follow. I also receive an \"\n \"e-mail when someone comments on one of my \"\n \"contributions.\"\n ),\n required=False,\n initial=True,\n )\n email = forms.EmailField(widget=forms.HiddenInput())\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"username\"].help_text = _(\n \"Your username will appear publicly next to your posts.\"\n )\n\n def save(self, request):\n user = super(SocialTermsSignupForm, self).save(request)\n user.get_newsletters = self.cleaned_data[\"get_newsletters\"]\n user.get_notifications = self.cleaned_data[\"get_notifications\"]\n user.save()\n return user\n", "path": "meinberlin/apps/users/forms.py"}], "after_files": [{"content": "import collections\n\nfrom allauth.account.forms import SignupForm\nfrom allauth.socialaccount.forms import SignupForm as SocialSignupForm\nfrom django import forms\nfrom django.contrib.auth import forms as auth_forms\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import ngettext\n\nfrom meinberlin.apps.captcha.fields import CaptcheckCaptchaField\nfrom meinberlin.apps.organisations.models import Organisation\nfrom meinberlin.apps.users.models import User\n\n\nclass UserAdminForm(auth_forms.UserChangeForm):\n def clean(self):\n groups = self.cleaned_data.get(\"groups\")\n group_list = groups.values_list(\"id\", flat=True)\n group_organisations = Organisation.objects.filter(\n groups__in=group_list\n ).values_list(\"name\", flat=True)\n duplicates = [\n item\n for item, count in collections.Counter(group_organisations).items()\n if count > 1\n ]\n if duplicates:\n count = len(duplicates)\n message = ngettext(\n \"User is member in more than one group \"\n \"in this organisation: %(duplicates)s.\",\n \"User is member in more than one group \"\n \"in these organisations: %(duplicates)s.\",\n count,\n ) % {\"duplicates\": \", \".join(duplicates)}\n raise ValidationError(message)\n return self.cleaned_data\n\n def clean_username(self):\n username = self.cleaned_data[\"username\"]\n try:\n user = User.objects.get(username__iexact=username)\n if user != self.instance:\n raise forms.ValidationError(\n User._meta.get_field(\"username\").error_messages[\"unique\"]\n )\n except User.DoesNotExist:\n pass\n\n try:\n user = User.objects.get(email__iexact=username)\n if user != self.instance:\n raise forms.ValidationError(\n User._meta.get_field(\"username\").error_messages[\"used_as_email\"]\n )\n except User.DoesNotExist:\n pass\n\n return username\n\n\nclass AddUserAdminForm(auth_forms.UserCreationForm):\n def clean_username(self):\n username = self.cleaned_data[\"username\"]\n user = User.objects.filter(username__iexact=username)\n if user.exists():\n raise forms.ValidationError(\n User._meta.get_field(\"username\").error_messages[\"unique\"]\n )\n else:\n user = User.objects.filter(email__iexact=username)\n if user.exists():\n raise forms.ValidationError(\n User._meta.get_field(\"username\").error_messages[\"used_as_email\"]\n )\n return username\n\n\nclass TermsSignupForm(SignupForm):\n terms_of_use = forms.BooleanField(label=_(\"Terms of use\"))\n get_newsletters = forms.BooleanField(\n label=_(\"Newsletter\"),\n help_text=_(\n \"Yes, I would like to receive e-mail newsletters about \"\n \"the projects I am following.\"\n ),\n required=False,\n )\n get_notifications = forms.BooleanField(\n label=_(\"Notifications\"),\n help_text=_(\n \"Yes, I would like to be notified by e-mail about the \"\n \"start and end of participation opportunities. This \"\n \"applies to all projects I follow. I also receive an \"\n \"e-mail when someone comments on one of my \"\n \"contributions.\"\n ),\n required=False,\n initial=True,\n )\n captcha = CaptcheckCaptchaField(label=_(\"I am not a robot\"))\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"username\"].help_text = _(\n \"Your username will appear publicly next to your posts.\"\n )\n self.fields[\"email\"].widget.attrs[\"autofocus\"] = True\n\n def save(self, request):\n user = super(TermsSignupForm, self).save(request)\n if user:\n user.get_newsletters = self.cleaned_data[\"get_newsletters\"]\n user.get_notifications = self.cleaned_data[\"get_notifications\"]\n user.save()\n return user\n\n\nclass SocialTermsSignupForm(SocialSignupForm):\n terms_of_use = forms.BooleanField(label=_(\"Terms of use\"))\n get_newsletters = forms.BooleanField(\n label=_(\"Newsletter\"),\n help_text=_(\n \"Yes, I would like to receive e-mail newsletters about \"\n \"the projects I am following.\"\n ),\n required=False,\n )\n get_notifications = forms.BooleanField(\n label=_(\"Notifications\"),\n help_text=_(\n \"Yes, I would like to be notified by e-mail about the \"\n \"start and end of participation opportunities. This \"\n \"applies to all projects I follow. I also receive an \"\n \"e-mail when someone comments on one of my \"\n \"contributions.\"\n ),\n required=False,\n initial=True,\n )\n email = forms.EmailField(widget=forms.HiddenInput())\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"username\"].help_text = _(\n \"Your username will appear publicly next to your posts.\"\n )\n\n def save(self, request):\n user = super(SocialTermsSignupForm, self).save(request)\n user.get_newsletters = self.cleaned_data[\"get_newsletters\"]\n user.get_notifications = self.cleaned_data[\"get_notifications\"]\n user.save()\n return user\n", "path": "meinberlin/apps/users/forms.py"}]}
1,992
281
gh_patches_debug_1944
rasdani/github-patches
git_diff
napari__napari-277
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- blending mode update error ## 🐛 Bug When viewing multiple layers with blending, I am experiencing a bug whereby changing the blending mode doesn't result in an immediate update. The update does occur when I change the opacity (at which point is happens immediately). ![bug](https://user-images.githubusercontent.com/3387500/55253093-aa80cc00-5211-11e9-828c-686595346b86.gif) ## To Reproduce Steps to reproduce the behavior: 1. Open the viewer with multiple layers (e.g. `examples/layers.py`) 2. Reduce the opacity of the top most layer to 0.5 3. Change the blending mode (e.g. `translucent` -> `opaque`) ## Expected behavior The update to what is rendered should happen immediately upon updating the blending mode. ## Environment - napari 0.18 - OS X 10.14.3 - Python version: 3.7.2 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `napari/layers/_base_layer/_visual_wrapper.py` Content: ``` 1 # TODO: create & use our own transform class 2 from vispy.visuals.transforms import STTransform 3 from vispy.gloo import get_state_presets 4 from ...util.event import EmitterGroup, Event 5 6 7 class VisualWrapper: 8 """Wrapper around ``vispy.scene.VisualNode`` objects. 9 Meant to be subclassed. 10 11 "Hidden" properties: 12 * ``_master_transform`` 13 * ``_order`` 14 * ``_parent`` 15 16 Parameters 17 ---------- 18 central_node : vispy.scene.VisualNode 19 Central node/control point with which to interact with the visual. 20 Stored as ``_node``. 21 22 Attributes 23 ---------- 24 opacity 25 visible 26 scale 27 blending 28 translate 29 z_index 30 31 Notes 32 ----- 33 It is recommended to use the backported ``vispy`` nodes 34 at ``_vispy.scene.visuals`` for various bug fixes. 35 """ 36 def __init__(self, central_node): 37 self._node = central_node 38 self._blending = 'translucent' 39 self.events = EmitterGroup(source=self, 40 auto_connect=True, 41 blending=Event, 42 opacity=Event, 43 visible=Event) 44 45 _blending_modes = set(get_state_presets().keys()) 46 47 @property 48 def _master_transform(self): 49 """vispy.visuals.transforms.STTransform: 50 Central node's firstmost transform. 51 """ 52 # whenever a new parent is set, the transform is reset 53 # to a NullTransform so we reset it here 54 if not isinstance(self._node.transform, STTransform): 55 self._node.transform = STTransform() 56 57 return self._node.transform 58 59 @property 60 def _order(self): 61 """int: Order in which the visual is drawn in the scenegraph. 62 Lower values are closer to the viewer. 63 """ 64 return self._node.order 65 66 @_order.setter 67 def _order(self, order): 68 # workaround for opacity (see: #22) 69 order = -order 70 self.z_index = order 71 # end workaround 72 self._node.order = order 73 74 @property 75 def _parent(self): 76 """vispy.scene.Node: Parent node. 77 """ 78 return self._node.parent 79 80 @_parent.setter 81 def _parent(self, parent): 82 self._node.parent = parent 83 84 @property 85 def opacity(self): 86 """float: Opacity value between 0.0 and 1.0. 87 """ 88 return self._node.opacity 89 90 @opacity.setter 91 def opacity(self, opacity): 92 if not 0.0 <= opacity <= 1.0: 93 raise ValueError('opacity must be between 0.0 and 1.0; ' 94 f'got {opacity}') 95 96 self._node.opacity = opacity 97 self.events.opacity() 98 99 @property 100 def blending(self): 101 """{'opaque', 'translucent', 'additive'}: Blending mode. 102 Selects a preset blending mode in vispy that determines how 103 RGB and alpha values get mixed. 104 'opaque' 105 Allows for only the top layer to be visible and corresponds to 106 depth_test=True, cull_face=False, blend=False. 107 'translucent' 108 Allows for multiple layers to be blended with different opacity 109 and corresponds to depth_test=True, cull_face=False, 110 blend=True, blend_func=('src_alpha', 'one_minus_src_alpha'). 111 'additive' 112 Allows for multiple layers to be blended together with 113 different colors and opacity. Useful for creating overlays. It 114 corresponds to depth_test=False, cull_face=False, blend=True, 115 blend_func=('src_alpha', 'one'). 116 """ 117 return self._blending 118 119 @blending.setter 120 def blending(self, blending): 121 if blending not in self._blending_modes: 122 raise ValueError('expected one of ' 123 "{'opaque', 'translucent', 'additive'}; " 124 f'got {blending}') 125 self._node.set_gl_state(blending) 126 self._blending = blending 127 self.events.blending() 128 129 @property 130 def visible(self): 131 """bool: Whether the visual is currently being displayed. 132 """ 133 return self._node.visible 134 135 @visible.setter 136 def visible(self, visibility): 137 self._node.visible = visibility 138 self.events.visible() 139 140 @property 141 def scale(self): 142 """sequence of float: Scale factors. 143 """ 144 return self._master_transform.scale 145 146 @scale.setter 147 def scale(self, scale): 148 self._master_transform.scale = scale 149 150 @property 151 def translate(self): 152 """sequence of float: Translation values. 153 """ 154 return self._master_transform.translate 155 156 @translate.setter 157 def translate(self, translate): 158 self._master_transform.translate = translate 159 160 @property 161 def z_index(self): 162 return -self._master_transform.translate[2] 163 164 @z_index.setter 165 def z_index(self, index): 166 tr = self._master_transform 167 tl = tr.translate 168 tl[2] = -index 169 170 tr.translate = tl 171 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/napari/layers/_base_layer/_visual_wrapper.py b/napari/layers/_base_layer/_visual_wrapper.py --- a/napari/layers/_base_layer/_visual_wrapper.py +++ b/napari/layers/_base_layer/_visual_wrapper.py @@ -124,6 +124,7 @@ f'got {blending}') self._node.set_gl_state(blending) self._blending = blending + self._node.update() self.events.blending() @property
{"golden_diff": "diff --git a/napari/layers/_base_layer/_visual_wrapper.py b/napari/layers/_base_layer/_visual_wrapper.py\n--- a/napari/layers/_base_layer/_visual_wrapper.py\n+++ b/napari/layers/_base_layer/_visual_wrapper.py\n@@ -124,6 +124,7 @@\n f'got {blending}')\n self._node.set_gl_state(blending)\n self._blending = blending\n+ self._node.update()\n self.events.blending()\n \n @property\n", "issue": "blending mode update error\n## \ud83d\udc1b Bug\r\n\r\nWhen viewing multiple layers with blending, I am experiencing a bug whereby changing the blending mode doesn't result in an immediate update. The update does occur when I change the opacity (at which point is happens immediately). \r\n\r\n![bug](https://user-images.githubusercontent.com/3387500/55253093-aa80cc00-5211-11e9-828c-686595346b86.gif)\r\n\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Open the viewer with multiple layers (e.g. `examples/layers.py`)\r\n2. Reduce the opacity of the top most layer to 0.5\r\n3. Change the blending mode (e.g. `translucent` -> `opaque`)\r\n\r\n## Expected behavior\r\n\r\nThe update to what is rendered should happen immediately upon updating the blending mode.\r\n\r\n## Environment\r\n\r\n - napari 0.18\r\n - OS X 10.14.3\r\n - Python version: 3.7.2\r\n\n", "before_files": [{"content": "# TODO: create & use our own transform class\nfrom vispy.visuals.transforms import STTransform\nfrom vispy.gloo import get_state_presets\nfrom ...util.event import EmitterGroup, Event\n\n\nclass VisualWrapper:\n \"\"\"Wrapper around ``vispy.scene.VisualNode`` objects.\n Meant to be subclassed.\n\n \"Hidden\" properties:\n * ``_master_transform``\n * ``_order``\n * ``_parent``\n\n Parameters\n ----------\n central_node : vispy.scene.VisualNode\n Central node/control point with which to interact with the visual.\n Stored as ``_node``.\n\n Attributes\n ----------\n opacity\n visible\n scale\n blending\n translate\n z_index\n\n Notes\n -----\n It is recommended to use the backported ``vispy`` nodes\n at ``_vispy.scene.visuals`` for various bug fixes.\n \"\"\"\n def __init__(self, central_node):\n self._node = central_node\n self._blending = 'translucent'\n self.events = EmitterGroup(source=self,\n auto_connect=True,\n blending=Event,\n opacity=Event,\n visible=Event)\n\n _blending_modes = set(get_state_presets().keys())\n\n @property\n def _master_transform(self):\n \"\"\"vispy.visuals.transforms.STTransform:\n Central node's firstmost transform.\n \"\"\"\n # whenever a new parent is set, the transform is reset\n # to a NullTransform so we reset it here\n if not isinstance(self._node.transform, STTransform):\n self._node.transform = STTransform()\n\n return self._node.transform\n\n @property\n def _order(self):\n \"\"\"int: Order in which the visual is drawn in the scenegraph.\n Lower values are closer to the viewer.\n \"\"\"\n return self._node.order\n\n @_order.setter\n def _order(self, order):\n # workaround for opacity (see: #22)\n order = -order\n self.z_index = order\n # end workaround\n self._node.order = order\n\n @property\n def _parent(self):\n \"\"\"vispy.scene.Node: Parent node.\n \"\"\"\n return self._node.parent\n\n @_parent.setter\n def _parent(self, parent):\n self._node.parent = parent\n\n @property\n def opacity(self):\n \"\"\"float: Opacity value between 0.0 and 1.0.\n \"\"\"\n return self._node.opacity\n\n @opacity.setter\n def opacity(self, opacity):\n if not 0.0 <= opacity <= 1.0:\n raise ValueError('opacity must be between 0.0 and 1.0; '\n f'got {opacity}')\n\n self._node.opacity = opacity\n self.events.opacity()\n\n @property\n def blending(self):\n \"\"\"{'opaque', 'translucent', 'additive'}: Blending mode.\n Selects a preset blending mode in vispy that determines how\n RGB and alpha values get mixed.\n 'opaque'\n Allows for only the top layer to be visible and corresponds to\n depth_test=True, cull_face=False, blend=False.\n 'translucent'\n Allows for multiple layers to be blended with different opacity\n and corresponds to depth_test=True, cull_face=False,\n blend=True, blend_func=('src_alpha', 'one_minus_src_alpha').\n 'additive'\n Allows for multiple layers to be blended together with\n different colors and opacity. Useful for creating overlays. It\n corresponds to depth_test=False, cull_face=False, blend=True,\n blend_func=('src_alpha', 'one').\n \"\"\"\n return self._blending\n\n @blending.setter\n def blending(self, blending):\n if blending not in self._blending_modes:\n raise ValueError('expected one of '\n \"{'opaque', 'translucent', 'additive'}; \"\n f'got {blending}')\n self._node.set_gl_state(blending)\n self._blending = blending\n self.events.blending()\n\n @property\n def visible(self):\n \"\"\"bool: Whether the visual is currently being displayed.\n \"\"\"\n return self._node.visible\n\n @visible.setter\n def visible(self, visibility):\n self._node.visible = visibility\n self.events.visible()\n\n @property\n def scale(self):\n \"\"\"sequence of float: Scale factors.\n \"\"\"\n return self._master_transform.scale\n\n @scale.setter\n def scale(self, scale):\n self._master_transform.scale = scale\n\n @property\n def translate(self):\n \"\"\"sequence of float: Translation values.\n \"\"\"\n return self._master_transform.translate\n\n @translate.setter\n def translate(self, translate):\n self._master_transform.translate = translate\n\n @property\n def z_index(self):\n return -self._master_transform.translate[2]\n\n @z_index.setter\n def z_index(self, index):\n tr = self._master_transform\n tl = tr.translate\n tl[2] = -index\n\n tr.translate = tl\n", "path": "napari/layers/_base_layer/_visual_wrapper.py"}], "after_files": [{"content": "# TODO: create & use our own transform class\nfrom vispy.visuals.transforms import STTransform\nfrom vispy.gloo import get_state_presets\nfrom ...util.event import EmitterGroup, Event\n\n\nclass VisualWrapper:\n \"\"\"Wrapper around ``vispy.scene.VisualNode`` objects.\n Meant to be subclassed.\n\n \"Hidden\" properties:\n * ``_master_transform``\n * ``_order``\n * ``_parent``\n\n Parameters\n ----------\n central_node : vispy.scene.VisualNode\n Central node/control point with which to interact with the visual.\n Stored as ``_node``.\n\n Attributes\n ----------\n opacity\n visible\n scale\n blending\n translate\n z_index\n\n Notes\n -----\n It is recommended to use the backported ``vispy`` nodes\n at ``_vispy.scene.visuals`` for various bug fixes.\n \"\"\"\n def __init__(self, central_node):\n self._node = central_node\n self._blending = 'translucent'\n self.events = EmitterGroup(source=self,\n auto_connect=True,\n blending=Event,\n opacity=Event,\n visible=Event)\n\n _blending_modes = set(get_state_presets().keys())\n\n @property\n def _master_transform(self):\n \"\"\"vispy.visuals.transforms.STTransform:\n Central node's firstmost transform.\n \"\"\"\n # whenever a new parent is set, the transform is reset\n # to a NullTransform so we reset it here\n if not isinstance(self._node.transform, STTransform):\n self._node.transform = STTransform()\n\n return self._node.transform\n\n @property\n def _order(self):\n \"\"\"int: Order in which the visual is drawn in the scenegraph.\n Lower values are closer to the viewer.\n \"\"\"\n return self._node.order\n\n @_order.setter\n def _order(self, order):\n # workaround for opacity (see: #22)\n order = -order\n self.z_index = order\n # end workaround\n self._node.order = order\n\n @property\n def _parent(self):\n \"\"\"vispy.scene.Node: Parent node.\n \"\"\"\n return self._node.parent\n\n @_parent.setter\n def _parent(self, parent):\n self._node.parent = parent\n\n @property\n def opacity(self):\n \"\"\"float: Opacity value between 0.0 and 1.0.\n \"\"\"\n return self._node.opacity\n\n @opacity.setter\n def opacity(self, opacity):\n if not 0.0 <= opacity <= 1.0:\n raise ValueError('opacity must be between 0.0 and 1.0; '\n f'got {opacity}')\n\n self._node.opacity = opacity\n self.events.opacity()\n\n @property\n def blending(self):\n \"\"\"{'opaque', 'translucent', 'additive'}: Blending mode.\n Selects a preset blending mode in vispy that determines how\n RGB and alpha values get mixed.\n 'opaque'\n Allows for only the top layer to be visible and corresponds to\n depth_test=True, cull_face=False, blend=False.\n 'translucent'\n Allows for multiple layers to be blended with different opacity\n and corresponds to depth_test=True, cull_face=False,\n blend=True, blend_func=('src_alpha', 'one_minus_src_alpha').\n 'additive'\n Allows for multiple layers to be blended together with\n different colors and opacity. Useful for creating overlays. It\n corresponds to depth_test=False, cull_face=False, blend=True,\n blend_func=('src_alpha', 'one').\n \"\"\"\n return self._blending\n\n @blending.setter\n def blending(self, blending):\n if blending not in self._blending_modes:\n raise ValueError('expected one of '\n \"{'opaque', 'translucent', 'additive'}; \"\n f'got {blending}')\n self._node.set_gl_state(blending)\n self._blending = blending\n self._node.update()\n self.events.blending()\n\n @property\n def visible(self):\n \"\"\"bool: Whether the visual is currently being displayed.\n \"\"\"\n return self._node.visible\n\n @visible.setter\n def visible(self, visibility):\n self._node.visible = visibility\n self.events.visible()\n\n @property\n def scale(self):\n \"\"\"sequence of float: Scale factors.\n \"\"\"\n return self._master_transform.scale\n\n @scale.setter\n def scale(self, scale):\n self._master_transform.scale = scale\n\n @property\n def translate(self):\n \"\"\"sequence of float: Translation values.\n \"\"\"\n return self._master_transform.translate\n\n @translate.setter\n def translate(self, translate):\n self._master_transform.translate = translate\n\n @property\n def z_index(self):\n return -self._master_transform.translate[2]\n\n @z_index.setter\n def z_index(self, index):\n tr = self._master_transform\n tl = tr.translate\n tl[2] = -index\n\n tr.translate = tl\n", "path": "napari/layers/_base_layer/_visual_wrapper.py"}]}
2,015
118
gh_patches_debug_31555
rasdani/github-patches
git_diff
chainer__chainer-970
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- SplitAxis should return a tuple with a single element `Function.__call__` returns a `Variable` when its forward function returns only one return value. When the number of return values depends on input values, such as `split_axis`, a user need to check the type of the return value of `Function`. A user needs to write a complicated source code. I have two design choice: - Only `SplitAxis` always returns a tuple of `Variable`s - Add keyword argument `force_tuple` to `Function.__call__` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `chainer/functions/array/split_axis.py` Content: ``` 1 import collections 2 3 import six 4 5 from chainer import cuda 6 from chainer import function 7 from chainer.utils import type_check 8 9 10 class SplitAxis(function.Function): 11 12 """Function that splits multiple arrays towards the specified axis.""" 13 14 def __init__(self, indices_or_sections, axis): 15 if not isinstance(indices_or_sections, (int, collections.Iterable)): 16 raise TypeError('indices_or_sections must be integer or 1-D array') 17 self.indices_or_sections = indices_or_sections 18 self.axis = axis 19 20 def check_type_forward(self, in_types): 21 type_check.expect(in_types.size() == 1) 22 type_check.expect(in_types[0].ndim > self.axis) 23 24 if isinstance(self.indices_or_sections, collections.Iterable): 25 max_index = type_check.Variable( 26 self.indices_or_sections[-1], 'max_index') 27 type_check.expect(in_types[0].shape[self.axis] > max_index) 28 else: 29 sections = type_check.Variable( 30 self.indices_or_sections, 'sections') 31 type_check.expect(in_types[0].shape[self.axis] % sections == 0) 32 33 def forward(self, x): 34 if isinstance(self.indices_or_sections, collections.Iterable): 35 cdimx = x[0].shape[self.axis] 36 ind = list(self.indices_or_sections) 37 ind.append(cdimx) 38 prev_i = 0 39 for i in ind: 40 cdimy = max(0, min(i, cdimx) - prev_i) 41 if cdimy == 0: 42 raise ValueError('Not support if shape contains 0') 43 prev_i = i 44 xp = cuda.get_array_module(*x) 45 return tuple(xp.split(x[0], self.indices_or_sections, self.axis)) 46 47 def backward(self, x, gys): 48 xp = cuda.get_array_module(*x) 49 if any(gy is None for gy in gys): 50 gx = xp.zeros_like(x[0]) 51 gxs = xp.split(gx, self.indices_or_sections, self.axis) 52 for gxi, gy in six.moves.zip(gxs, gys): 53 if gy is None: 54 continue 55 gxi[:] = gy 56 return gx, 57 else: 58 return xp.concatenate(gys, axis=self.axis), 59 60 61 def split_axis(x, indices_or_sections, axis): 62 """Splits given variables along an axis. 63 64 Args: 65 x (tuple of Variables): Variables to be split. 66 indices_or_sections (int or 1-D array): If this argument is an integer, 67 N, the array will be divided into N equal arrays along axis. 68 If it is a 1-D array of sorted integers, it 69 indicates the positions where the array is split. 70 axis (int): Axis that the input array is split along. 71 72 Returns: 73 tuple or Variable: Tuple of :class:`~chainer.Variable` objects 74 if the number of outputs is more than 1 or 75 :class:`~chainer.Variable` otherwise. 76 77 .. note:: 78 This function raises :class:`ValueError` if at least 79 one of the outputs is split to zero-size 80 (i.e. `axis`-th value of its shape is zero). 81 82 """ 83 return SplitAxis(indices_or_sections, axis)(x) 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/chainer/functions/array/split_axis.py b/chainer/functions/array/split_axis.py --- a/chainer/functions/array/split_axis.py +++ b/chainer/functions/array/split_axis.py @@ -2,6 +2,7 @@ import six +import chainer from chainer import cuda from chainer import function from chainer.utils import type_check @@ -58,7 +59,7 @@ return xp.concatenate(gys, axis=self.axis), -def split_axis(x, indices_or_sections, axis): +def split_axis(x, indices_or_sections, axis, force_tuple=False): """Splits given variables along an axis. Args: @@ -68,16 +69,23 @@ If it is a 1-D array of sorted integers, it indicates the positions where the array is split. axis (int): Axis that the input array is split along. + force_tuple (bool): If ``True``, this method returns a tuple even when + the number of outputs is one. Returns: tuple or Variable: Tuple of :class:`~chainer.Variable` objects if the number of outputs is more than 1 or :class:`~chainer.Variable` otherwise. + When ``force_tuple`` is ``True``, returned value is always a tuple + regardless of the number of outputs. .. note:: This function raises :class:`ValueError` if at least one of the outputs is split to zero-size - (i.e. `axis`-th value of its shape is zero). + (i.e. ``axis``-th value of its shape is zero). """ - return SplitAxis(indices_or_sections, axis)(x) + res = SplitAxis(indices_or_sections, axis)(x) + if force_tuple and isinstance(res, chainer.Variable): + res = (res,) + return res
{"golden_diff": "diff --git a/chainer/functions/array/split_axis.py b/chainer/functions/array/split_axis.py\n--- a/chainer/functions/array/split_axis.py\n+++ b/chainer/functions/array/split_axis.py\n@@ -2,6 +2,7 @@\n \n import six\n \n+import chainer\n from chainer import cuda\n from chainer import function\n from chainer.utils import type_check\n@@ -58,7 +59,7 @@\n return xp.concatenate(gys, axis=self.axis),\n \n \n-def split_axis(x, indices_or_sections, axis):\n+def split_axis(x, indices_or_sections, axis, force_tuple=False):\n \"\"\"Splits given variables along an axis.\n \n Args:\n@@ -68,16 +69,23 @@\n If it is a 1-D array of sorted integers, it\n indicates the positions where the array is split.\n axis (int): Axis that the input array is split along.\n+ force_tuple (bool): If ``True``, this method returns a tuple even when\n+ the number of outputs is one.\n \n Returns:\n tuple or Variable: Tuple of :class:`~chainer.Variable` objects\n if the number of outputs is more than 1 or\n :class:`~chainer.Variable` otherwise.\n+ When ``force_tuple`` is ``True``, returned value is always a tuple\n+ regardless of the number of outputs.\n \n .. note::\n This function raises :class:`ValueError` if at least\n one of the outputs is split to zero-size\n- (i.e. `axis`-th value of its shape is zero).\n+ (i.e. ``axis``-th value of its shape is zero).\n \n \"\"\"\n- return SplitAxis(indices_or_sections, axis)(x)\n+ res = SplitAxis(indices_or_sections, axis)(x)\n+ if force_tuple and isinstance(res, chainer.Variable):\n+ res = (res,)\n+ return res\n", "issue": "SplitAxis should return a tuple with a single element\n`Function.__call__` returns a `Variable` when its forward function returns only one return value. When the number of return values depends on input values, such as `split_axis`, a user need to check the type of the return value of `Function`. A user needs to write a complicated source code.\n\nI have two design choice:\n- Only `SplitAxis` always returns a tuple of `Variable`s\n- Add keyword argument `force_tuple` to `Function.__call__`\n\n", "before_files": [{"content": "import collections\n\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass SplitAxis(function.Function):\n\n \"\"\"Function that splits multiple arrays towards the specified axis.\"\"\"\n\n def __init__(self, indices_or_sections, axis):\n if not isinstance(indices_or_sections, (int, collections.Iterable)):\n raise TypeError('indices_or_sections must be integer or 1-D array')\n self.indices_or_sections = indices_or_sections\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n type_check.expect(in_types[0].ndim > self.axis)\n\n if isinstance(self.indices_or_sections, collections.Iterable):\n max_index = type_check.Variable(\n self.indices_or_sections[-1], 'max_index')\n type_check.expect(in_types[0].shape[self.axis] > max_index)\n else:\n sections = type_check.Variable(\n self.indices_or_sections, 'sections')\n type_check.expect(in_types[0].shape[self.axis] % sections == 0)\n\n def forward(self, x):\n if isinstance(self.indices_or_sections, collections.Iterable):\n cdimx = x[0].shape[self.axis]\n ind = list(self.indices_or_sections)\n ind.append(cdimx)\n prev_i = 0\n for i in ind:\n cdimy = max(0, min(i, cdimx) - prev_i)\n if cdimy == 0:\n raise ValueError('Not support if shape contains 0')\n prev_i = i\n xp = cuda.get_array_module(*x)\n return tuple(xp.split(x[0], self.indices_or_sections, self.axis))\n\n def backward(self, x, gys):\n xp = cuda.get_array_module(*x)\n if any(gy is None for gy in gys):\n gx = xp.zeros_like(x[0])\n gxs = xp.split(gx, self.indices_or_sections, self.axis)\n for gxi, gy in six.moves.zip(gxs, gys):\n if gy is None:\n continue\n gxi[:] = gy\n return gx,\n else:\n return xp.concatenate(gys, axis=self.axis),\n\n\ndef split_axis(x, indices_or_sections, axis):\n \"\"\"Splits given variables along an axis.\n\n Args:\n x (tuple of Variables): Variables to be split.\n indices_or_sections (int or 1-D array): If this argument is an integer,\n N, the array will be divided into N equal arrays along axis.\n If it is a 1-D array of sorted integers, it\n indicates the positions where the array is split.\n axis (int): Axis that the input array is split along.\n\n Returns:\n tuple or Variable: Tuple of :class:`~chainer.Variable` objects\n if the number of outputs is more than 1 or\n :class:`~chainer.Variable` otherwise.\n\n .. note::\n This function raises :class:`ValueError` if at least\n one of the outputs is split to zero-size\n (i.e. `axis`-th value of its shape is zero).\n\n \"\"\"\n return SplitAxis(indices_or_sections, axis)(x)\n", "path": "chainer/functions/array/split_axis.py"}], "after_files": [{"content": "import collections\n\nimport six\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass SplitAxis(function.Function):\n\n \"\"\"Function that splits multiple arrays towards the specified axis.\"\"\"\n\n def __init__(self, indices_or_sections, axis):\n if not isinstance(indices_or_sections, (int, collections.Iterable)):\n raise TypeError('indices_or_sections must be integer or 1-D array')\n self.indices_or_sections = indices_or_sections\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n type_check.expect(in_types[0].ndim > self.axis)\n\n if isinstance(self.indices_or_sections, collections.Iterable):\n max_index = type_check.Variable(\n self.indices_or_sections[-1], 'max_index')\n type_check.expect(in_types[0].shape[self.axis] > max_index)\n else:\n sections = type_check.Variable(\n self.indices_or_sections, 'sections')\n type_check.expect(in_types[0].shape[self.axis] % sections == 0)\n\n def forward(self, x):\n if isinstance(self.indices_or_sections, collections.Iterable):\n cdimx = x[0].shape[self.axis]\n ind = list(self.indices_or_sections)\n ind.append(cdimx)\n prev_i = 0\n for i in ind:\n cdimy = max(0, min(i, cdimx) - prev_i)\n if cdimy == 0:\n raise ValueError('Not support if shape contains 0')\n prev_i = i\n xp = cuda.get_array_module(*x)\n return tuple(xp.split(x[0], self.indices_or_sections, self.axis))\n\n def backward(self, x, gys):\n xp = cuda.get_array_module(*x)\n if any(gy is None for gy in gys):\n gx = xp.zeros_like(x[0])\n gxs = xp.split(gx, self.indices_or_sections, self.axis)\n for gxi, gy in six.moves.zip(gxs, gys):\n if gy is None:\n continue\n gxi[:] = gy\n return gx,\n else:\n return xp.concatenate(gys, axis=self.axis),\n\n\ndef split_axis(x, indices_or_sections, axis, force_tuple=False):\n \"\"\"Splits given variables along an axis.\n\n Args:\n x (tuple of Variables): Variables to be split.\n indices_or_sections (int or 1-D array): If this argument is an integer,\n N, the array will be divided into N equal arrays along axis.\n If it is a 1-D array of sorted integers, it\n indicates the positions where the array is split.\n axis (int): Axis that the input array is split along.\n force_tuple (bool): If ``True``, this method returns a tuple even when\n the number of outputs is one.\n\n Returns:\n tuple or Variable: Tuple of :class:`~chainer.Variable` objects\n if the number of outputs is more than 1 or\n :class:`~chainer.Variable` otherwise.\n When ``force_tuple`` is ``True``, returned value is always a tuple\n regardless of the number of outputs.\n\n .. note::\n This function raises :class:`ValueError` if at least\n one of the outputs is split to zero-size\n (i.e. ``axis``-th value of its shape is zero).\n\n \"\"\"\n res = SplitAxis(indices_or_sections, axis)(x)\n if force_tuple and isinstance(res, chainer.Variable):\n res = (res,)\n return res\n", "path": "chainer/functions/array/split_axis.py"}]}
1,233
420
gh_patches_debug_920
rasdani/github-patches
git_diff
speechbrain__speechbrain-1127
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Broken docs for `speechbrain.alignment.ctc_segmentation` Hi, thanks for maintaining such a wonderful library. Looks like the documentation for `speechbrain.alignment.ctc_segmentation` is broken: https://speechbrain.readthedocs.io/en/latest/API/speechbrain.alignment.ctc_segmentation.html I guess this is caused by unneeded shebang, as shown in the following: https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/alignment/ctc_segmentation.py#L1-L2 Perhaps this could be related to #819 ? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 7 # -- Path setup -------------------------------------------------------------- 8 9 # If extensions (or modules to document with autodoc) are in another directory, 10 # add these directories to sys.path here. If the directory is relative to the 11 # documentation root, use os.path.abspath to make it absolute, like shown here. 12 # 13 import os 14 import sys 15 import hyperpyyaml 16 17 18 sys.path.insert(0, os.path.abspath("../speechbrain")) 19 20 21 # -- Project information ----------------------------------------------------- 22 23 project = "SpeechBrain" 24 copyright = "2021, SpeechBrain" 25 author = "SpeechBrain" 26 27 # The full version, including alpha/beta/rc tags 28 release = "0.5.0" 29 30 31 # -- General configuration --------------------------------------------------- 32 33 # Add any Sphinx extension module names here, as strings. They can be 34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 35 # ones. 36 extensions = [ 37 "sphinx.ext.autodoc", 38 "sphinx.ext.intersphinx", 39 "sphinx.ext.mathjax", 40 "sphinx.ext.viewcode", 41 "sphinx.ext.autosummary", 42 "sphinx.ext.napoleon", 43 "recommonmark", 44 ] 45 46 47 # Napoleon settings 48 napoleon_google_docstring = False 49 napoleon_numpy_docstring = True 50 napoleon_include_init_with_doc = True 51 napoleon_include_private_with_doc = False 52 napoleon_include_special_with_doc = True 53 napoleon_use_admonition_for_examples = False 54 napoleon_use_admonition_for_notes = True 55 napoleon_use_admonition_for_references = False 56 napoleon_use_ivar = False 57 napoleon_use_param = True 58 napoleon_use_rtype = True 59 60 # Intersphinx mapping: 61 intersphinx_mapping = { 62 "python": ("https://docs.python.org/", None), 63 "numpy": ("http://docs.scipy.org/doc/numpy/", None), 64 "torch": ("https://pytorch.org/docs/master/", None), 65 } 66 67 # AUTODOC: 68 69 autodoc_default_options = {} 70 71 # Autodoc mock extra dependencies: 72 autodoc_mock_imports = ["numba", "sklearn"] 73 74 # Order of API items: 75 autodoc_member_order = "bysource" 76 autodoc_default_options = {"member-order": "bysource"} 77 78 # Don't show inherited docstrings: 79 autodoc_inherit_docstrings = False 80 81 # Add any paths that contain templates here, relative to this directory. 82 templates_path = ["_templates"] 83 84 # List of patterns, relative to source directory, that match files and 85 # directories to ignore when looking for source files. 86 # This pattern also affects html_static_path and html_extra_path. 87 exclude_patterns = ["_apidoc_templates"] 88 89 # -- Better apidoc ----------------------------------------------------------- 90 91 92 def run_apidoc(app): 93 """Generage API documentation""" 94 import better_apidoc 95 96 better_apidoc.APP = app 97 98 better_apidoc.main( 99 [ 100 "better-apidoc", 101 "-t", 102 "_apidoc_templates", 103 "--force", 104 "--no-toc", 105 "--separate", 106 "-o", 107 "API", 108 os.path.dirname(hyperpyyaml.__file__), 109 ] 110 ) 111 better_apidoc.main( 112 [ 113 "better-apidoc", 114 "-t", 115 "_apidoc_templates", 116 "--force", 117 "--no-toc", 118 "--separate", 119 "-o", 120 "API", 121 os.path.join("../", "speechbrain"), 122 ] 123 ) 124 125 126 # -- Options for HTML output ------------------------------------------------- 127 128 # The theme to use for HTML and HTML Help pages. See the documentation for 129 # a list of builtin themes. 130 # 131 html_theme = "sphinx_rtd_theme" 132 # See https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html 133 # for rtd theme options 134 html_theme_options = { 135 # Toc options 136 "collapse_navigation": False, 137 "sticky_navigation": True, 138 "navigation_depth": 4, 139 "includehidden": True, 140 } 141 142 143 # Add any paths that contain custom static files (such as style sheets) here, 144 # relative to this directory. They are copied after the builtin static files, 145 # so a file named "default.css" will overwrite the builtin "default.css". 146 html_static_path = ["_static"] 147 148 source_suffix = { 149 ".rst": "restructuredtext", 150 ".txt": "markdown", 151 ".md": "markdown", 152 } 153 154 155 def setup(app): 156 app.connect("builder-inited", run_apidoc) 157 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -69,7 +69,7 @@ autodoc_default_options = {} # Autodoc mock extra dependencies: -autodoc_mock_imports = ["numba", "sklearn"] +autodoc_mock_imports = ["sklearn"] # Order of API items: autodoc_member_order = "bysource"
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -69,7 +69,7 @@\n autodoc_default_options = {}\n \n # Autodoc mock extra dependencies:\n-autodoc_mock_imports = [\"numba\", \"sklearn\"]\n+autodoc_mock_imports = [\"sklearn\"]\n \n # Order of API items:\n autodoc_member_order = \"bysource\"\n", "issue": "Broken docs for `speechbrain.alignment.ctc_segmentation`\nHi, thanks for maintaining such a wonderful library.\r\n\r\nLooks like the documentation for `speechbrain.alignment.ctc_segmentation` is broken:\r\nhttps://speechbrain.readthedocs.io/en/latest/API/speechbrain.alignment.ctc_segmentation.html\r\n\r\nI guess this is caused by unneeded shebang, as shown in the following:\r\nhttps://github.com/speechbrain/speechbrain/blob/develop/speechbrain/alignment/ctc_segmentation.py#L1-L2\r\n\r\nPerhaps this could be related to #819 ?\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport hyperpyyaml\n\n\nsys.path.insert(0, os.path.abspath(\"../speechbrain\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"SpeechBrain\"\ncopyright = \"2021, SpeechBrain\"\nauthor = \"SpeechBrain\"\n\n# The full version, including alpha/beta/rc tags\nrelease = \"0.5.0\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.napoleon\",\n \"recommonmark\",\n]\n\n\n# Napoleon settings\nnapoleon_google_docstring = False\nnapoleon_numpy_docstring = True\nnapoleon_include_init_with_doc = True\nnapoleon_include_private_with_doc = False\nnapoleon_include_special_with_doc = True\nnapoleon_use_admonition_for_examples = False\nnapoleon_use_admonition_for_notes = True\nnapoleon_use_admonition_for_references = False\nnapoleon_use_ivar = False\nnapoleon_use_param = True\nnapoleon_use_rtype = True\n\n# Intersphinx mapping:\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/\", None),\n \"numpy\": (\"http://docs.scipy.org/doc/numpy/\", None),\n \"torch\": (\"https://pytorch.org/docs/master/\", None),\n}\n\n# AUTODOC:\n\nautodoc_default_options = {}\n\n# Autodoc mock extra dependencies:\nautodoc_mock_imports = [\"numba\", \"sklearn\"]\n\n# Order of API items:\nautodoc_member_order = \"bysource\"\nautodoc_default_options = {\"member-order\": \"bysource\"}\n\n# Don't show inherited docstrings:\nautodoc_inherit_docstrings = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_apidoc_templates\"]\n\n# -- Better apidoc -----------------------------------------------------------\n\n\ndef run_apidoc(app):\n \"\"\"Generage API documentation\"\"\"\n import better_apidoc\n\n better_apidoc.APP = app\n\n better_apidoc.main(\n [\n \"better-apidoc\",\n \"-t\",\n \"_apidoc_templates\",\n \"--force\",\n \"--no-toc\",\n \"--separate\",\n \"-o\",\n \"API\",\n os.path.dirname(hyperpyyaml.__file__),\n ]\n )\n better_apidoc.main(\n [\n \"better-apidoc\",\n \"-t\",\n \"_apidoc_templates\",\n \"--force\",\n \"--no-toc\",\n \"--separate\",\n \"-o\",\n \"API\",\n os.path.join(\"../\", \"speechbrain\"),\n ]\n )\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n# See https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html\n# for rtd theme options\nhtml_theme_options = {\n # Toc options\n \"collapse_navigation\": False,\n \"sticky_navigation\": True,\n \"navigation_depth\": 4,\n \"includehidden\": True,\n}\n\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".txt\": \"markdown\",\n \".md\": \"markdown\",\n}\n\n\ndef setup(app):\n app.connect(\"builder-inited\", run_apidoc)\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport hyperpyyaml\n\n\nsys.path.insert(0, os.path.abspath(\"../speechbrain\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"SpeechBrain\"\ncopyright = \"2021, SpeechBrain\"\nauthor = \"SpeechBrain\"\n\n# The full version, including alpha/beta/rc tags\nrelease = \"0.5.0\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.napoleon\",\n \"recommonmark\",\n]\n\n\n# Napoleon settings\nnapoleon_google_docstring = False\nnapoleon_numpy_docstring = True\nnapoleon_include_init_with_doc = True\nnapoleon_include_private_with_doc = False\nnapoleon_include_special_with_doc = True\nnapoleon_use_admonition_for_examples = False\nnapoleon_use_admonition_for_notes = True\nnapoleon_use_admonition_for_references = False\nnapoleon_use_ivar = False\nnapoleon_use_param = True\nnapoleon_use_rtype = True\n\n# Intersphinx mapping:\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/\", None),\n \"numpy\": (\"http://docs.scipy.org/doc/numpy/\", None),\n \"torch\": (\"https://pytorch.org/docs/master/\", None),\n}\n\n# AUTODOC:\n\nautodoc_default_options = {}\n\n# Autodoc mock extra dependencies:\nautodoc_mock_imports = [\"sklearn\"]\n\n# Order of API items:\nautodoc_member_order = \"bysource\"\nautodoc_default_options = {\"member-order\": \"bysource\"}\n\n# Don't show inherited docstrings:\nautodoc_inherit_docstrings = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_apidoc_templates\"]\n\n# -- Better apidoc -----------------------------------------------------------\n\n\ndef run_apidoc(app):\n \"\"\"Generage API documentation\"\"\"\n import better_apidoc\n\n better_apidoc.APP = app\n\n better_apidoc.main(\n [\n \"better-apidoc\",\n \"-t\",\n \"_apidoc_templates\",\n \"--force\",\n \"--no-toc\",\n \"--separate\",\n \"-o\",\n \"API\",\n os.path.dirname(hyperpyyaml.__file__),\n ]\n )\n better_apidoc.main(\n [\n \"better-apidoc\",\n \"-t\",\n \"_apidoc_templates\",\n \"--force\",\n \"--no-toc\",\n \"--separate\",\n \"-o\",\n \"API\",\n os.path.join(\"../\", \"speechbrain\"),\n ]\n )\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n# See https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html\n# for rtd theme options\nhtml_theme_options = {\n # Toc options\n \"collapse_navigation\": False,\n \"sticky_navigation\": True,\n \"navigation_depth\": 4,\n \"includehidden\": True,\n}\n\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".txt\": \"markdown\",\n \".md\": \"markdown\",\n}\n\n\ndef setup(app):\n app.connect(\"builder-inited\", run_apidoc)\n", "path": "docs/conf.py"}]}
1,751
97
gh_patches_debug_6953
rasdani/github-patches
git_diff
svthalia__concrexit-2526
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Vacancies without partner page on homepage lead to 404 errors ### Describe the bug The vacancy view on the homepage leads to a 404 for vacancies without partner pages, as it tries to go to that page anyway. ### How to reproduce Steps to reproduce the behaviour: 1. Create an inactive partner 2. Create a vacancy with them 3. Make it appear on the homepage 4. Click it ### Expected behaviour Going to the vacancy in some way, e.g. on the general vacancy page. ### Screenshots If applicable, add screenshots to help explain your problem. ### Additional context I think I sort of fixed this for the main vacancy list, so we can probably share the code there. (Although I believe that that is currently broken too.) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/partners/templatetags/frontpage_vacancies.py` Content: ``` 1 from django import template 2 from django.urls import reverse 3 4 from partners.models import Vacancy 5 6 register = template.Library() 7 8 9 @register.inclusion_tag("partners/frontpage_vacancies.html") 10 def render_frontpage_vacancies(): 11 vacancies = [] 12 13 for vacancy in Vacancy.objects.order_by("?")[:6]: 14 url = "{}#vacancy-{}".format(reverse("partners:vacancies"), vacancy.id) 15 if vacancy.partner: 16 url = "{}#vacancy-{}".format(vacancy.partner.get_absolute_url(), vacancy.id) 17 18 vacancies.append( 19 { 20 "title": vacancy.title, 21 "company_name": vacancy.get_company_name(), 22 "url": url, 23 } 24 ) 25 26 return {"vacancies": vacancies} 27 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/partners/templatetags/frontpage_vacancies.py b/website/partners/templatetags/frontpage_vacancies.py --- a/website/partners/templatetags/frontpage_vacancies.py +++ b/website/partners/templatetags/frontpage_vacancies.py @@ -12,7 +12,7 @@ for vacancy in Vacancy.objects.order_by("?")[:6]: url = "{}#vacancy-{}".format(reverse("partners:vacancies"), vacancy.id) - if vacancy.partner: + if vacancy.partner and vacancy.partner.is_active: url = "{}#vacancy-{}".format(vacancy.partner.get_absolute_url(), vacancy.id) vacancies.append(
{"golden_diff": "diff --git a/website/partners/templatetags/frontpage_vacancies.py b/website/partners/templatetags/frontpage_vacancies.py\n--- a/website/partners/templatetags/frontpage_vacancies.py\n+++ b/website/partners/templatetags/frontpage_vacancies.py\n@@ -12,7 +12,7 @@\n \n for vacancy in Vacancy.objects.order_by(\"?\")[:6]:\n url = \"{}#vacancy-{}\".format(reverse(\"partners:vacancies\"), vacancy.id)\n- if vacancy.partner:\n+ if vacancy.partner and vacancy.partner.is_active:\n url = \"{}#vacancy-{}\".format(vacancy.partner.get_absolute_url(), vacancy.id)\n \n vacancies.append(\n", "issue": "Vacancies without partner page on homepage lead to 404 errors\n### Describe the bug\r\nThe vacancy view on the homepage leads to a 404 for vacancies without partner pages, as it tries to go to that page anyway.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Create an inactive partner\r\n2. Create a vacancy with them\r\n3. Make it appear on the homepage\r\n4. Click it\r\n\r\n### Expected behaviour\r\nGoing to the vacancy in some way, e.g. on the general vacancy page.\r\n\r\n### Screenshots\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n### Additional context\r\nI think I sort of fixed this for the main vacancy list, so we can probably share the code there. (Although I believe that that is currently broken too.)\r\n\n", "before_files": [{"content": "from django import template\nfrom django.urls import reverse\n\nfrom partners.models import Vacancy\n\nregister = template.Library()\n\n\n@register.inclusion_tag(\"partners/frontpage_vacancies.html\")\ndef render_frontpage_vacancies():\n vacancies = []\n\n for vacancy in Vacancy.objects.order_by(\"?\")[:6]:\n url = \"{}#vacancy-{}\".format(reverse(\"partners:vacancies\"), vacancy.id)\n if vacancy.partner:\n url = \"{}#vacancy-{}\".format(vacancy.partner.get_absolute_url(), vacancy.id)\n\n vacancies.append(\n {\n \"title\": vacancy.title,\n \"company_name\": vacancy.get_company_name(),\n \"url\": url,\n }\n )\n\n return {\"vacancies\": vacancies}\n", "path": "website/partners/templatetags/frontpage_vacancies.py"}], "after_files": [{"content": "from django import template\nfrom django.urls import reverse\n\nfrom partners.models import Vacancy\n\nregister = template.Library()\n\n\n@register.inclusion_tag(\"partners/frontpage_vacancies.html\")\ndef render_frontpage_vacancies():\n vacancies = []\n\n for vacancy in Vacancy.objects.order_by(\"?\")[:6]:\n url = \"{}#vacancy-{}\".format(reverse(\"partners:vacancies\"), vacancy.id)\n if vacancy.partner and vacancy.partner.is_active:\n url = \"{}#vacancy-{}\".format(vacancy.partner.get_absolute_url(), vacancy.id)\n\n vacancies.append(\n {\n \"title\": vacancy.title,\n \"company_name\": vacancy.get_company_name(),\n \"url\": url,\n }\n )\n\n return {\"vacancies\": vacancies}\n", "path": "website/partners/templatetags/frontpage_vacancies.py"}]}
634
162
gh_patches_debug_42930
rasdani/github-patches
git_diff
easybuilders__easybuild-easyblocks-2416
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Tkinter easyblock does not work with `--module-only` ``` == sanity checking... ERROR: Traceback (most recent call last): File "/project/def-sponsor00/ocaisa/easybuild/software/EasyBuild/4.3.4/lib/python3.8/site-packages/easybuild/main.py", line 117, in build_and_install_software (ec_res['success'], app_log, err) = build_and_install_one(ec, init_env) File "/project/def-sponsor00/ocaisa/easybuild/software/EasyBuild/4.3.4/lib/python3.8/site-packages/easybuild/framework/easyblock.py", line 3412, in build_and_install_one result = app.run_all_steps(run_test_cases=run_test_cases) File "/project/def-sponsor00/ocaisa/easybuild/software/EasyBuild/4.3.4/lib/python3.8/site-packages/easybuild/framework/easyblock.py", line 3311, in run_all_steps self.run_step(step_name, step_methods) File "/project/def-sponsor00/ocaisa/easybuild/software/EasyBuild/4.3.4/lib/python3.8/site-packages/easybuild/framework/easyblock.py", line 3166, in run_step step_method(self)() File "/project/def-sponsor00/ocaisa/easybuild/software/EasyBuild/4.3.4/lib/python3.8/site-packages/easybuild/easyblocks/t/tkinter.py", line 101, in sanity_check_step 'files': [os.path.join(os.path.dirname(det_pylibdir()), self.tkinter_so_basename)], AttributeError: 'EB_Tkinter' object has no attribute 'tkinter_so_basename' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `easybuild/easyblocks/t/tkinter.py` Content: ``` 1 ## 2 # Copyright 2009-2021 Ghent University 3 # 4 # This file is part of EasyBuild, 5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), 6 # with support of Ghent University (http://ugent.be/hpc), 7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), 8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en) 9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). 10 # 11 # https://github.com/easybuilders/easybuild 12 # 13 # EasyBuild is free software: you can redistribute it and/or modify 14 # it under the terms of the GNU General Public License as published by 15 # the Free Software Foundation v2. 16 # 17 # EasyBuild is distributed in the hope that it will be useful, 18 # but WITHOUT ANY WARRANTY; without even the implied warranty of 19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 # GNU General Public License for more details. 21 # 22 # You should have received a copy of the GNU General Public License 23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. 24 ## 25 """ 26 EasyBuild support for building and installing Tkinter. This is the Python core 27 module to use Tcl/Tk. 28 29 @author: Adam Huffman (The Francis Crick Institute) 30 @author: Ward Poelmans (Free University of Brussels) 31 @author: Kenneth Hoste (HPC-UGent) 32 """ 33 import glob 34 import os 35 import tempfile 36 from distutils.version import LooseVersion 37 38 import easybuild.tools.environment as env 39 from easybuild.easyblocks.generic.pythonpackage import det_pylibdir 40 from easybuild.easyblocks.python import EB_Python 41 from easybuild.tools.build_log import EasyBuildError 42 from easybuild.tools.filetools import copy, move_file, remove_dir 43 from easybuild.tools.modules import get_software_root 44 from easybuild.tools.systemtools import get_shared_lib_ext 45 46 47 class EB_Tkinter(EB_Python): 48 """Support for building/installing the Python Tkinter module 49 based on the normal Python module. We build a normal python 50 but only install the Tkinter bits. 51 """ 52 53 def configure_step(self): 54 """Check for Tk before configuring""" 55 tk = get_software_root('Tk') 56 if not tk: 57 raise EasyBuildError("Tk is mandatory to build Tkinter") 58 59 # avoid that pip (ab)uses $HOME/.cache/pip 60 # cfr. https://pip.pypa.io/en/stable/reference/pip_install/#caching 61 env.setvar('XDG_CACHE_HOME', tempfile.gettempdir()) 62 self.log.info("Using %s as pip cache directory", os.environ['XDG_CACHE_HOME']) 63 64 super(EB_Tkinter, self).configure_step() 65 66 def install_step(self): 67 """Install python but only keep the bits we need""" 68 super(EB_Tkinter, self).install_step() 69 70 tmpdir = tempfile.mkdtemp(dir=self.builddir) 71 72 pylibdir = os.path.join(self.installdir, os.path.dirname(det_pylibdir())) 73 shlib_ext = get_shared_lib_ext() 74 tkinter_so = os.path.join(pylibdir, 'lib-dynload', '_tkinter*.' + shlib_ext) 75 tkinter_so_hits = glob.glob(tkinter_so) 76 if len(tkinter_so_hits) != 1: 77 raise EasyBuildError("Expected to find exactly one _tkinter*.so: %s", tkinter_so_hits) 78 self.tkinter_so_basename = os.path.basename(tkinter_so_hits[0]) 79 if LooseVersion(self.version) >= LooseVersion('3'): 80 tkparts = ["tkinter", os.path.join("lib-dynload", self.tkinter_so_basename)] 81 else: 82 tkparts = ["lib-tk", os.path.join("lib-dynload", self.tkinter_so_basename)] 83 84 copy([os.path.join(pylibdir, x) for x in tkparts], tmpdir) 85 86 remove_dir(self.installdir) 87 88 move_file(os.path.join(tmpdir, tkparts[0]), os.path.join(pylibdir, tkparts[0])) 89 tkinter_so = os.path.basename(tkparts[1]) 90 move_file(os.path.join(tmpdir, tkinter_so), os.path.join(pylibdir, tkinter_so)) 91 92 def sanity_check_step(self): 93 """Custom sanity check for Python.""" 94 if LooseVersion(self.version) >= LooseVersion('3'): 95 tkinter = 'tkinter' 96 else: 97 tkinter = 'Tkinter' 98 custom_commands = ["python -c 'import %s'" % tkinter] 99 100 custom_paths = { 101 'files': [os.path.join(os.path.dirname(det_pylibdir()), self.tkinter_so_basename)], 102 'dirs': ['lib'] 103 } 104 super(EB_Python, self).sanity_check_step(custom_commands=custom_commands, custom_paths=custom_paths) 105 106 def make_module_extra(self): 107 """Set PYTHONPATH""" 108 txt = super(EB_Tkinter, self).make_module_extra() 109 pylibdir = os.path.dirname(det_pylibdir()) 110 txt += self.module_generator.prepend_paths('PYTHONPATH', pylibdir) 111 112 return txt 113 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/easybuild/easyblocks/t/tkinter.py b/easybuild/easyblocks/t/tkinter.py --- a/easybuild/easyblocks/t/tkinter.py +++ b/easybuild/easyblocks/t/tkinter.py @@ -39,6 +39,7 @@ from easybuild.easyblocks.generic.pythonpackage import det_pylibdir from easybuild.easyblocks.python import EB_Python from easybuild.tools.build_log import EasyBuildError +from easybuild.tools.config import build_option from easybuild.tools.filetools import copy, move_file, remove_dir from easybuild.tools.modules import get_software_root from easybuild.tools.systemtools import get_shared_lib_ext @@ -50,6 +51,11 @@ but only install the Tkinter bits. """ + def __init__(self, *args, **kwargs): + """Initialize Tkinter-specific variables.""" + super(EB_Tkinter, self).__init__(*args, **kwargs) + self.tkinter_so_basename = '' + def configure_step(self): """Check for Tk before configuring""" tk = get_software_root('Tk') @@ -69,18 +75,14 @@ tmpdir = tempfile.mkdtemp(dir=self.builddir) - pylibdir = os.path.join(self.installdir, os.path.dirname(det_pylibdir())) - shlib_ext = get_shared_lib_ext() - tkinter_so = os.path.join(pylibdir, 'lib-dynload', '_tkinter*.' + shlib_ext) - tkinter_so_hits = glob.glob(tkinter_so) - if len(tkinter_so_hits) != 1: - raise EasyBuildError("Expected to find exactly one _tkinter*.so: %s", tkinter_so_hits) - self.tkinter_so_basename = os.path.basename(tkinter_so_hits[0]) + if not self.tkinter_so_basename: + self.tkinter_so_basename = self.get_tkinter_so_basename() if LooseVersion(self.version) >= LooseVersion('3'): tkparts = ["tkinter", os.path.join("lib-dynload", self.tkinter_so_basename)] else: tkparts = ["lib-tk", os.path.join("lib-dynload", self.tkinter_so_basename)] + pylibdir = os.path.join(self.installdir, os.path.dirname(det_pylibdir())) copy([os.path.join(pylibdir, x) for x in tkparts], tmpdir) remove_dir(self.installdir) @@ -89,6 +91,21 @@ tkinter_so = os.path.basename(tkparts[1]) move_file(os.path.join(tmpdir, tkinter_so), os.path.join(pylibdir, tkinter_so)) + def get_tkinter_so_basename(self): + pylibdir = os.path.join(self.installdir, os.path.dirname(det_pylibdir())) + shlib_ext = get_shared_lib_ext() + if build_option('module_only'): + # The build has already taken place so the file will have been moved into the final pylibdir + tkinter_so = os.path.join(pylibdir, '_tkinter*.' + shlib_ext) + else: + tkinter_so = os.path.join(pylibdir, 'lib-dynload', '_tkinter*.' + shlib_ext) + tkinter_so_hits = glob.glob(tkinter_so) + if len(tkinter_so_hits) != 1: + raise EasyBuildError("Expected to find exactly one _tkinter*.so: %s", tkinter_so_hits) + tkinter_so_basename = os.path.basename(tkinter_so_hits[0]) + + return tkinter_so_basename + def sanity_check_step(self): """Custom sanity check for Python.""" if LooseVersion(self.version) >= LooseVersion('3'): @@ -97,6 +114,9 @@ tkinter = 'Tkinter' custom_commands = ["python -c 'import %s'" % tkinter] + if not self.tkinter_so_basename: + self.tkinter_so_basename = self.get_tkinter_so_basename() + custom_paths = { 'files': [os.path.join(os.path.dirname(det_pylibdir()), self.tkinter_so_basename)], 'dirs': ['lib']
{"golden_diff": "diff --git a/easybuild/easyblocks/t/tkinter.py b/easybuild/easyblocks/t/tkinter.py\n--- a/easybuild/easyblocks/t/tkinter.py\n+++ b/easybuild/easyblocks/t/tkinter.py\n@@ -39,6 +39,7 @@\n from easybuild.easyblocks.generic.pythonpackage import det_pylibdir\n from easybuild.easyblocks.python import EB_Python\n from easybuild.tools.build_log import EasyBuildError\n+from easybuild.tools.config import build_option\n from easybuild.tools.filetools import copy, move_file, remove_dir\n from easybuild.tools.modules import get_software_root\n from easybuild.tools.systemtools import get_shared_lib_ext\n@@ -50,6 +51,11 @@\n but only install the Tkinter bits.\n \"\"\"\n \n+ def __init__(self, *args, **kwargs):\n+ \"\"\"Initialize Tkinter-specific variables.\"\"\"\n+ super(EB_Tkinter, self).__init__(*args, **kwargs)\n+ self.tkinter_so_basename = ''\n+\n def configure_step(self):\n \"\"\"Check for Tk before configuring\"\"\"\n tk = get_software_root('Tk')\n@@ -69,18 +75,14 @@\n \n tmpdir = tempfile.mkdtemp(dir=self.builddir)\n \n- pylibdir = os.path.join(self.installdir, os.path.dirname(det_pylibdir()))\n- shlib_ext = get_shared_lib_ext()\n- tkinter_so = os.path.join(pylibdir, 'lib-dynload', '_tkinter*.' + shlib_ext)\n- tkinter_so_hits = glob.glob(tkinter_so)\n- if len(tkinter_so_hits) != 1:\n- raise EasyBuildError(\"Expected to find exactly one _tkinter*.so: %s\", tkinter_so_hits)\n- self.tkinter_so_basename = os.path.basename(tkinter_so_hits[0])\n+ if not self.tkinter_so_basename:\n+ self.tkinter_so_basename = self.get_tkinter_so_basename()\n if LooseVersion(self.version) >= LooseVersion('3'):\n tkparts = [\"tkinter\", os.path.join(\"lib-dynload\", self.tkinter_so_basename)]\n else:\n tkparts = [\"lib-tk\", os.path.join(\"lib-dynload\", self.tkinter_so_basename)]\n \n+ pylibdir = os.path.join(self.installdir, os.path.dirname(det_pylibdir()))\n copy([os.path.join(pylibdir, x) for x in tkparts], tmpdir)\n \n remove_dir(self.installdir)\n@@ -89,6 +91,21 @@\n tkinter_so = os.path.basename(tkparts[1])\n move_file(os.path.join(tmpdir, tkinter_so), os.path.join(pylibdir, tkinter_so))\n \n+ def get_tkinter_so_basename(self):\n+ pylibdir = os.path.join(self.installdir, os.path.dirname(det_pylibdir()))\n+ shlib_ext = get_shared_lib_ext()\n+ if build_option('module_only'):\n+ # The build has already taken place so the file will have been moved into the final pylibdir\n+ tkinter_so = os.path.join(pylibdir, '_tkinter*.' + shlib_ext)\n+ else:\n+ tkinter_so = os.path.join(pylibdir, 'lib-dynload', '_tkinter*.' + shlib_ext)\n+ tkinter_so_hits = glob.glob(tkinter_so)\n+ if len(tkinter_so_hits) != 1:\n+ raise EasyBuildError(\"Expected to find exactly one _tkinter*.so: %s\", tkinter_so_hits)\n+ tkinter_so_basename = os.path.basename(tkinter_so_hits[0])\n+\n+ return tkinter_so_basename\n+\n def sanity_check_step(self):\n \"\"\"Custom sanity check for Python.\"\"\"\n if LooseVersion(self.version) >= LooseVersion('3'):\n@@ -97,6 +114,9 @@\n tkinter = 'Tkinter'\n custom_commands = [\"python -c 'import %s'\" % tkinter]\n \n+ if not self.tkinter_so_basename:\n+ self.tkinter_so_basename = self.get_tkinter_so_basename()\n+\n custom_paths = {\n 'files': [os.path.join(os.path.dirname(det_pylibdir()), self.tkinter_so_basename)],\n 'dirs': ['lib']\n", "issue": "Tkinter easyblock does not work with `--module-only`\n```\r\n== sanity checking...\r\nERROR: Traceback (most recent call last):\r\n File \"/project/def-sponsor00/ocaisa/easybuild/software/EasyBuild/4.3.4/lib/python3.8/site-packages/easybuild/main.py\", line 117, in build_and_install_software\r\n (ec_res['success'], app_log, err) = build_and_install_one(ec, init_env)\r\n File \"/project/def-sponsor00/ocaisa/easybuild/software/EasyBuild/4.3.4/lib/python3.8/site-packages/easybuild/framework/easyblock.py\", line 3412, in build_and_install_one\r\n result = app.run_all_steps(run_test_cases=run_test_cases)\r\n File \"/project/def-sponsor00/ocaisa/easybuild/software/EasyBuild/4.3.4/lib/python3.8/site-packages/easybuild/framework/easyblock.py\", line 3311, in run_all_steps\r\n self.run_step(step_name, step_methods)\r\n File \"/project/def-sponsor00/ocaisa/easybuild/software/EasyBuild/4.3.4/lib/python3.8/site-packages/easybuild/framework/easyblock.py\", line 3166, in run_step\r\n step_method(self)()\r\n File \"/project/def-sponsor00/ocaisa/easybuild/software/EasyBuild/4.3.4/lib/python3.8/site-packages/easybuild/easyblocks/t/tkinter.py\", line 101, in sanity_check_step\r\n 'files': [os.path.join(os.path.dirname(det_pylibdir()), self.tkinter_so_basename)],\r\nAttributeError: 'EB_Tkinter' object has no attribute 'tkinter_so_basename'\r\n```\n", "before_files": [{"content": "##\n# Copyright 2009-2021 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing Tkinter. This is the Python core\nmodule to use Tcl/Tk.\n\n@author: Adam Huffman (The Francis Crick Institute)\n@author: Ward Poelmans (Free University of Brussels)\n@author: Kenneth Hoste (HPC-UGent)\n\"\"\"\nimport glob\nimport os\nimport tempfile\nfrom distutils.version import LooseVersion\n\nimport easybuild.tools.environment as env\nfrom easybuild.easyblocks.generic.pythonpackage import det_pylibdir\nfrom easybuild.easyblocks.python import EB_Python\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.filetools import copy, move_file, remove_dir\nfrom easybuild.tools.modules import get_software_root\nfrom easybuild.tools.systemtools import get_shared_lib_ext\n\n\nclass EB_Tkinter(EB_Python):\n \"\"\"Support for building/installing the Python Tkinter module\n based on the normal Python module. We build a normal python\n but only install the Tkinter bits.\n \"\"\"\n\n def configure_step(self):\n \"\"\"Check for Tk before configuring\"\"\"\n tk = get_software_root('Tk')\n if not tk:\n raise EasyBuildError(\"Tk is mandatory to build Tkinter\")\n\n # avoid that pip (ab)uses $HOME/.cache/pip\n # cfr. https://pip.pypa.io/en/stable/reference/pip_install/#caching\n env.setvar('XDG_CACHE_HOME', tempfile.gettempdir())\n self.log.info(\"Using %s as pip cache directory\", os.environ['XDG_CACHE_HOME'])\n\n super(EB_Tkinter, self).configure_step()\n\n def install_step(self):\n \"\"\"Install python but only keep the bits we need\"\"\"\n super(EB_Tkinter, self).install_step()\n\n tmpdir = tempfile.mkdtemp(dir=self.builddir)\n\n pylibdir = os.path.join(self.installdir, os.path.dirname(det_pylibdir()))\n shlib_ext = get_shared_lib_ext()\n tkinter_so = os.path.join(pylibdir, 'lib-dynload', '_tkinter*.' + shlib_ext)\n tkinter_so_hits = glob.glob(tkinter_so)\n if len(tkinter_so_hits) != 1:\n raise EasyBuildError(\"Expected to find exactly one _tkinter*.so: %s\", tkinter_so_hits)\n self.tkinter_so_basename = os.path.basename(tkinter_so_hits[0])\n if LooseVersion(self.version) >= LooseVersion('3'):\n tkparts = [\"tkinter\", os.path.join(\"lib-dynload\", self.tkinter_so_basename)]\n else:\n tkparts = [\"lib-tk\", os.path.join(\"lib-dynload\", self.tkinter_so_basename)]\n\n copy([os.path.join(pylibdir, x) for x in tkparts], tmpdir)\n\n remove_dir(self.installdir)\n\n move_file(os.path.join(tmpdir, tkparts[0]), os.path.join(pylibdir, tkparts[0]))\n tkinter_so = os.path.basename(tkparts[1])\n move_file(os.path.join(tmpdir, tkinter_so), os.path.join(pylibdir, tkinter_so))\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for Python.\"\"\"\n if LooseVersion(self.version) >= LooseVersion('3'):\n tkinter = 'tkinter'\n else:\n tkinter = 'Tkinter'\n custom_commands = [\"python -c 'import %s'\" % tkinter]\n\n custom_paths = {\n 'files': [os.path.join(os.path.dirname(det_pylibdir()), self.tkinter_so_basename)],\n 'dirs': ['lib']\n }\n super(EB_Python, self).sanity_check_step(custom_commands=custom_commands, custom_paths=custom_paths)\n\n def make_module_extra(self):\n \"\"\"Set PYTHONPATH\"\"\"\n txt = super(EB_Tkinter, self).make_module_extra()\n pylibdir = os.path.dirname(det_pylibdir())\n txt += self.module_generator.prepend_paths('PYTHONPATH', pylibdir)\n\n return txt\n", "path": "easybuild/easyblocks/t/tkinter.py"}], "after_files": [{"content": "##\n# Copyright 2009-2021 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing Tkinter. This is the Python core\nmodule to use Tcl/Tk.\n\n@author: Adam Huffman (The Francis Crick Institute)\n@author: Ward Poelmans (Free University of Brussels)\n@author: Kenneth Hoste (HPC-UGent)\n\"\"\"\nimport glob\nimport os\nimport tempfile\nfrom distutils.version import LooseVersion\n\nimport easybuild.tools.environment as env\nfrom easybuild.easyblocks.generic.pythonpackage import det_pylibdir\nfrom easybuild.easyblocks.python import EB_Python\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.config import build_option\nfrom easybuild.tools.filetools import copy, move_file, remove_dir\nfrom easybuild.tools.modules import get_software_root\nfrom easybuild.tools.systemtools import get_shared_lib_ext\n\n\nclass EB_Tkinter(EB_Python):\n \"\"\"Support for building/installing the Python Tkinter module\n based on the normal Python module. We build a normal python\n but only install the Tkinter bits.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize Tkinter-specific variables.\"\"\"\n super(EB_Tkinter, self).__init__(*args, **kwargs)\n self.tkinter_so_basename = ''\n\n def configure_step(self):\n \"\"\"Check for Tk before configuring\"\"\"\n tk = get_software_root('Tk')\n if not tk:\n raise EasyBuildError(\"Tk is mandatory to build Tkinter\")\n\n # avoid that pip (ab)uses $HOME/.cache/pip\n # cfr. https://pip.pypa.io/en/stable/reference/pip_install/#caching\n env.setvar('XDG_CACHE_HOME', tempfile.gettempdir())\n self.log.info(\"Using %s as pip cache directory\", os.environ['XDG_CACHE_HOME'])\n\n super(EB_Tkinter, self).configure_step()\n\n def install_step(self):\n \"\"\"Install python but only keep the bits we need\"\"\"\n super(EB_Tkinter, self).install_step()\n\n tmpdir = tempfile.mkdtemp(dir=self.builddir)\n\n if not self.tkinter_so_basename:\n self.tkinter_so_basename = self.get_tkinter_so_basename()\n if LooseVersion(self.version) >= LooseVersion('3'):\n tkparts = [\"tkinter\", os.path.join(\"lib-dynload\", self.tkinter_so_basename)]\n else:\n tkparts = [\"lib-tk\", os.path.join(\"lib-dynload\", self.tkinter_so_basename)]\n\n pylibdir = os.path.join(self.installdir, os.path.dirname(det_pylibdir()))\n copy([os.path.join(pylibdir, x) for x in tkparts], tmpdir)\n\n remove_dir(self.installdir)\n\n move_file(os.path.join(tmpdir, tkparts[0]), os.path.join(pylibdir, tkparts[0]))\n tkinter_so = os.path.basename(tkparts[1])\n move_file(os.path.join(tmpdir, tkinter_so), os.path.join(pylibdir, tkinter_so))\n\n def get_tkinter_so_basename(self):\n pylibdir = os.path.join(self.installdir, os.path.dirname(det_pylibdir()))\n shlib_ext = get_shared_lib_ext()\n if build_option('module_only'):\n # The build has already taken place so the file will have been moved into the final pylibdir\n tkinter_so = os.path.join(pylibdir, '_tkinter*.' + shlib_ext)\n else:\n tkinter_so = os.path.join(pylibdir, 'lib-dynload', '_tkinter*.' + shlib_ext)\n tkinter_so_hits = glob.glob(tkinter_so)\n if len(tkinter_so_hits) != 1:\n raise EasyBuildError(\"Expected to find exactly one _tkinter*.so: %s\", tkinter_so_hits)\n tkinter_so_basename = os.path.basename(tkinter_so_hits[0])\n\n return tkinter_so_basename\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for Python.\"\"\"\n if LooseVersion(self.version) >= LooseVersion('3'):\n tkinter = 'tkinter'\n else:\n tkinter = 'Tkinter'\n custom_commands = [\"python -c 'import %s'\" % tkinter]\n\n if not self.tkinter_so_basename:\n self.tkinter_so_basename = self.get_tkinter_so_basename()\n\n custom_paths = {\n 'files': [os.path.join(os.path.dirname(det_pylibdir()), self.tkinter_so_basename)],\n 'dirs': ['lib']\n }\n super(EB_Python, self).sanity_check_step(custom_commands=custom_commands, custom_paths=custom_paths)\n\n def make_module_extra(self):\n \"\"\"Set PYTHONPATH\"\"\"\n txt = super(EB_Tkinter, self).make_module_extra()\n pylibdir = os.path.dirname(det_pylibdir())\n txt += self.module_generator.prepend_paths('PYTHONPATH', pylibdir)\n\n return txt\n", "path": "easybuild/easyblocks/t/tkinter.py"}]}
2,028
937
gh_patches_debug_38534
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2338
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider sunoco is broken During the global build at 2021-07-21-14-42-39, spider **sunoco** failed with **0 features** and **1 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/logs/sunoco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/sunoco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/sunoco.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/sunoco.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import scrapy 3 import json 4 5 from locations.items import GeojsonPointItem 6 from locations.hours import OpeningHours 7 8 9 class SunocoSpider(scrapy.Spider): 10 name = "sunoco" 11 item_attributes = {'brand': "Sunoco", 'brand_wikidata': "Q1423218"} 12 allowed_domains = ["sunoco.com"] 13 14 start_urls = ['https://www.sunoco.com/js/locations.json'] 15 16 def parse(self, response): 17 for location in json.loads(response.body_as_unicode()): 18 opening_hours = OpeningHours() 19 20 for key in [ 21 'Hrs of Operation Mon-Sat Open', 22 'Hrs of Operation Mon-Sat Close', 23 'Hrs of Operation Sun Open', 24 'Hrs of Operation Sun Close' 25 ]: 26 if location[key] >= 2400: 27 location[key] -= 2400 28 29 for day in ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']: 30 opening_hours.add_range(day=day, 31 open_time=f"{location['Hrs of Operation Mon-Sat Open']:04d}", 32 close_time=f"{location['Hrs of Operation Mon-Sat Close']:04d}", 33 time_format='%H%M') 34 35 opening_hours.add_range(day='Su', 36 open_time=f"{location['Hrs of Operation Sun Open']:04d}", 37 close_time=f"{location['Hrs of Operation Sun Close']:04d}", 38 time_format='%H%M') 39 40 yield GeojsonPointItem( 41 ref=location['Facility ID'], 42 lon=location['Longitude'], 43 lat=location['Latitude'], 44 # name as shown on the Sunoco site 45 name=f"Sunoco #{location['Facility ID']}", 46 addr_full=location['Address'], 47 city=location['City'], 48 state=location['State'], 49 postcode=location['Zip'], 50 country='US', 51 phone=location['Phone'], 52 opening_hours=opening_hours.as_opening_hours(), 53 extras={ 54 'amenity:fuel': True, 55 'atm': int(location['ATM'] or 0) == 1, 56 'car_wash': int(location['Car Wash'] or 0) == 1, 57 'fuel:diesel': int(location['Diesel'] or 0) == 1, 58 'fuel:kerosene': int(location['Kerosene'] or 0) == 1 59 } 60 ) 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/sunoco.py b/locations/spiders/sunoco.py --- a/locations/spiders/sunoco.py +++ b/locations/spiders/sunoco.py @@ -17,44 +17,34 @@ for location in json.loads(response.body_as_unicode()): opening_hours = OpeningHours() - for key in [ - 'Hrs of Operation Mon-Sat Open', - 'Hrs of Operation Mon-Sat Close', - 'Hrs of Operation Sun Open', - 'Hrs of Operation Sun Close' - ]: - if location[key] >= 2400: - location[key] -= 2400 - - for day in ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']: - opening_hours.add_range(day=day, - open_time=f"{location['Hrs of Operation Mon-Sat Open']:04d}", - close_time=f"{location['Hrs of Operation Mon-Sat Close']:04d}", - time_format='%H%M') - - opening_hours.add_range(day='Su', - open_time=f"{location['Hrs of Operation Sun Open']:04d}", - close_time=f"{location['Hrs of Operation Sun Close']:04d}", - time_format='%H%M') + for key, val in location.items(): + if not key.endswith('_Hours'): + continue + day = key[:2].capitalize() + if val == '24 hours': + open_time = close_time = '12 AM' + else: + open_time, close_time = val.split(' to ') + opening_hours.add_range(day, open_time, close_time, '%I %p') yield GeojsonPointItem( - ref=location['Facility ID'], + ref=location['Store_ID'], lon=location['Longitude'], lat=location['Latitude'], # name as shown on the Sunoco site - name=f"Sunoco #{location['Facility ID']}", - addr_full=location['Address'], + name=f"Sunoco #{location['Store_ID']}", + addr_full=location['Street_Address'], city=location['City'], state=location['State'], - postcode=location['Zip'], + postcode=location['Postalcode'], country='US', phone=location['Phone'], opening_hours=opening_hours.as_opening_hours(), extras={ 'amenity:fuel': True, - 'atm': int(location['ATM'] or 0) == 1, - 'car_wash': int(location['Car Wash'] or 0) == 1, - 'fuel:diesel': int(location['Diesel'] or 0) == 1, - 'fuel:kerosene': int(location['Kerosene'] or 0) == 1 + 'atm': location['ATM'] == 'Y', + 'car_wash': location['CarWash'], + 'fuel:diesel': location['HasDiesel'] == 'Y', + 'fuel:kerosene': location['HasKero'] == 'Y' } )
{"golden_diff": "diff --git a/locations/spiders/sunoco.py b/locations/spiders/sunoco.py\n--- a/locations/spiders/sunoco.py\n+++ b/locations/spiders/sunoco.py\n@@ -17,44 +17,34 @@\n for location in json.loads(response.body_as_unicode()):\n opening_hours = OpeningHours()\n \n- for key in [\n- 'Hrs of Operation Mon-Sat Open',\n- 'Hrs of Operation Mon-Sat Close',\n- 'Hrs of Operation Sun Open',\n- 'Hrs of Operation Sun Close'\n- ]:\n- if location[key] >= 2400:\n- location[key] -= 2400\n-\n- for day in ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']:\n- opening_hours.add_range(day=day,\n- open_time=f\"{location['Hrs of Operation Mon-Sat Open']:04d}\",\n- close_time=f\"{location['Hrs of Operation Mon-Sat Close']:04d}\",\n- time_format='%H%M')\n-\n- opening_hours.add_range(day='Su',\n- open_time=f\"{location['Hrs of Operation Sun Open']:04d}\",\n- close_time=f\"{location['Hrs of Operation Sun Close']:04d}\",\n- time_format='%H%M')\n+ for key, val in location.items():\n+ if not key.endswith('_Hours'):\n+ continue\n+ day = key[:2].capitalize()\n+ if val == '24 hours':\n+ open_time = close_time = '12 AM'\n+ else:\n+ open_time, close_time = val.split(' to ')\n+ opening_hours.add_range(day, open_time, close_time, '%I %p')\n \n yield GeojsonPointItem(\n- ref=location['Facility ID'],\n+ ref=location['Store_ID'],\n lon=location['Longitude'],\n lat=location['Latitude'],\n # name as shown on the Sunoco site\n- name=f\"Sunoco #{location['Facility ID']}\",\n- addr_full=location['Address'],\n+ name=f\"Sunoco #{location['Store_ID']}\",\n+ addr_full=location['Street_Address'],\n city=location['City'],\n state=location['State'],\n- postcode=location['Zip'],\n+ postcode=location['Postalcode'],\n country='US',\n phone=location['Phone'],\n opening_hours=opening_hours.as_opening_hours(),\n extras={\n 'amenity:fuel': True,\n- 'atm': int(location['ATM'] or 0) == 1,\n- 'car_wash': int(location['Car Wash'] or 0) == 1,\n- 'fuel:diesel': int(location['Diesel'] or 0) == 1,\n- 'fuel:kerosene': int(location['Kerosene'] or 0) == 1\n+ 'atm': location['ATM'] == 'Y',\n+ 'car_wash': location['CarWash'],\n+ 'fuel:diesel': location['HasDiesel'] == 'Y',\n+ 'fuel:kerosene': location['HasKero'] == 'Y'\n }\n )\n", "issue": "Spider sunoco is broken\nDuring the global build at 2021-07-21-14-42-39, spider **sunoco** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/logs/sunoco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/sunoco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/sunoco.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass SunocoSpider(scrapy.Spider):\n name = \"sunoco\"\n item_attributes = {'brand': \"Sunoco\", 'brand_wikidata': \"Q1423218\"}\n allowed_domains = [\"sunoco.com\"]\n\n start_urls = ['https://www.sunoco.com/js/locations.json']\n\n def parse(self, response):\n for location in json.loads(response.body_as_unicode()):\n opening_hours = OpeningHours()\n\n for key in [\n 'Hrs of Operation Mon-Sat Open',\n 'Hrs of Operation Mon-Sat Close',\n 'Hrs of Operation Sun Open',\n 'Hrs of Operation Sun Close'\n ]:\n if location[key] >= 2400:\n location[key] -= 2400\n\n for day in ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']:\n opening_hours.add_range(day=day,\n open_time=f\"{location['Hrs of Operation Mon-Sat Open']:04d}\",\n close_time=f\"{location['Hrs of Operation Mon-Sat Close']:04d}\",\n time_format='%H%M')\n\n opening_hours.add_range(day='Su',\n open_time=f\"{location['Hrs of Operation Sun Open']:04d}\",\n close_time=f\"{location['Hrs of Operation Sun Close']:04d}\",\n time_format='%H%M')\n\n yield GeojsonPointItem(\n ref=location['Facility ID'],\n lon=location['Longitude'],\n lat=location['Latitude'],\n # name as shown on the Sunoco site\n name=f\"Sunoco #{location['Facility ID']}\",\n addr_full=location['Address'],\n city=location['City'],\n state=location['State'],\n postcode=location['Zip'],\n country='US',\n phone=location['Phone'],\n opening_hours=opening_hours.as_opening_hours(),\n extras={\n 'amenity:fuel': True,\n 'atm': int(location['ATM'] or 0) == 1,\n 'car_wash': int(location['Car Wash'] or 0) == 1,\n 'fuel:diesel': int(location['Diesel'] or 0) == 1,\n 'fuel:kerosene': int(location['Kerosene'] or 0) == 1\n }\n )\n", "path": "locations/spiders/sunoco.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass SunocoSpider(scrapy.Spider):\n name = \"sunoco\"\n item_attributes = {'brand': \"Sunoco\", 'brand_wikidata': \"Q1423218\"}\n allowed_domains = [\"sunoco.com\"]\n\n start_urls = ['https://www.sunoco.com/js/locations.json']\n\n def parse(self, response):\n for location in json.loads(response.body_as_unicode()):\n opening_hours = OpeningHours()\n\n for key, val in location.items():\n if not key.endswith('_Hours'):\n continue\n day = key[:2].capitalize()\n if val == '24 hours':\n open_time = close_time = '12 AM'\n else:\n open_time, close_time = val.split(' to ')\n opening_hours.add_range(day, open_time, close_time, '%I %p')\n\n yield GeojsonPointItem(\n ref=location['Store_ID'],\n lon=location['Longitude'],\n lat=location['Latitude'],\n # name as shown on the Sunoco site\n name=f\"Sunoco #{location['Store_ID']}\",\n addr_full=location['Street_Address'],\n city=location['City'],\n state=location['State'],\n postcode=location['Postalcode'],\n country='US',\n phone=location['Phone'],\n opening_hours=opening_hours.as_opening_hours(),\n extras={\n 'amenity:fuel': True,\n 'atm': location['ATM'] == 'Y',\n 'car_wash': location['CarWash'],\n 'fuel:diesel': location['HasDiesel'] == 'Y',\n 'fuel:kerosene': location['HasKero'] == 'Y'\n }\n )\n", "path": "locations/spiders/sunoco.py"}]}
1,094
707
gh_patches_debug_12828
rasdani/github-patches
git_diff
ethereum__web3.py-2320
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dependency conflict with py-evm 0.5* series * Version: 0.5.26 * Python: 3.10 * OS: linux ### What was wrong? Current web3.py not compatible with py-evm `0.5*`. Below are the relevant lines from `pip install` output: ``` The conflict is caused by: py-evm 0.5.0a2 depends on eth-utils<3.0.0 and >=2.0.0 web3 5.26.0 depends on eth-utils<2.0.0 and >=1.9.5 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from setuptools import ( 4 find_packages, 5 setup, 6 ) 7 8 extras_require = { 9 'tester': [ 10 "eth-tester[py-evm]==v0.6.0-beta.4", 11 "py-geth>=3.7.0,<4", 12 ], 13 'linter': [ 14 "flake8==3.8.3", 15 "isort>=4.2.15,<4.3.5", 16 "mypy==0.910", 17 "types-setuptools>=57.4.4,<58", 18 "types-requests>=2.26.1,<3", 19 "types-protobuf>=3.18.2,<4", 20 ], 21 'docs': [ 22 "mock", 23 "sphinx-better-theme>=0.1.4", 24 "click>=5.1", 25 "configparser==3.5.0", 26 "contextlib2>=0.5.4", 27 "py-geth>=3.6.0,<4", 28 "py-solc>=0.4.0", 29 "pytest>=4.4.0,<5.0.0", 30 "sphinx>=3.0,<4", 31 "sphinx_rtd_theme>=0.1.9", 32 "toposort>=1.4", 33 "towncrier==18.5.0", 34 "urllib3", 35 "wheel" 36 ], 37 'dev': [ 38 "bumpversion", 39 "flaky>=3.7.0,<4", 40 "hypothesis>=3.31.2,<6", 41 "pytest>=4.4.0,<5.0.0", 42 "pytest-asyncio>=0.10.0,<0.11", 43 "pytest-mock>=1.10,<2", 44 "pytest-pythonpath>=0.3", 45 "pytest-watch>=4.2,<5", 46 "pytest-xdist>=1.29,<2", 47 "setuptools>=38.6.0", 48 "tox>=1.8.0", 49 "tqdm>4.32,<5", 50 "twine>=1.13,<2", 51 "pluggy==0.13.1", 52 "when-changed>=0.3.0,<0.4" 53 ] 54 } 55 56 extras_require['dev'] = ( 57 extras_require['tester'] 58 + extras_require['linter'] 59 + extras_require['docs'] 60 + extras_require['dev'] 61 ) 62 63 with open('./README.md') as readme: 64 long_description = readme.read() 65 66 setup( 67 name='web3', 68 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility. 69 version='5.26.0', 70 description="""Web3.py""", 71 long_description_content_type='text/markdown', 72 long_description=long_description, 73 author='Piper Merriam', 74 author_email='pipermerriam@gmail.com', 75 url='https://github.com/ethereum/web3.py', 76 include_package_data=True, 77 install_requires=[ 78 "aiohttp>=3.7.4.post0,<4", 79 "eth-abi>=2.0.0b6,<3.0.0", 80 "eth-account>=0.5.6,<0.6.0", 81 "eth-hash[pycryptodome]>=0.2.0,<1.0.0", 82 "eth-typing>=2.0.0,<3.0.0", 83 "eth-utils>=1.9.5,<2.0.0", 84 "hexbytes>=0.1.0,<1.0.0", 85 "ipfshttpclient==0.8.0a2", 86 "jsonschema>=3.2.0,<4.0.0", 87 "lru-dict>=1.1.6,<2.0.0", 88 "protobuf>=3.10.0,<4", 89 "pywin32>=223;platform_system=='Windows'", 90 "requests>=2.16.0,<3.0.0", 91 # remove typing_extensions after python_requires>=3.8, see web3._utils.compat 92 "typing-extensions>=3.7.4.1,<5;python_version<'3.8'", 93 "websockets>=9.1,<10", 94 ], 95 python_requires='>=3.6,<4', 96 extras_require=extras_require, 97 py_modules=['web3', 'ens', 'ethpm'], 98 entry_points={"pytest11": ["pytest_ethereum = web3.tools.pytest_ethereum.plugins"]}, 99 license="MIT", 100 zip_safe=False, 101 keywords='ethereum', 102 packages=find_packages(exclude=["tests", "tests.*"]), 103 package_data={"web3": ["py.typed"]}, 104 classifiers=[ 105 'Development Status :: 5 - Production/Stable', 106 'Intended Audience :: Developers', 107 'License :: OSI Approved :: MIT License', 108 'Natural Language :: English', 109 'Programming Language :: Python :: 3', 110 'Programming Language :: Python :: 3.6', 111 'Programming Language :: Python :: 3.7', 112 'Programming Language :: Python :: 3.8', 113 'Programming Language :: Python :: 3.9', 114 ], 115 ) 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ extras_require = { 'tester': [ - "eth-tester[py-evm]==v0.6.0-beta.4", + "eth-tester[py-evm]==v0.6.0-beta.6", "py-geth>=3.7.0,<4", ], 'linter': [ @@ -77,7 +77,7 @@ install_requires=[ "aiohttp>=3.7.4.post0,<4", "eth-abi>=2.0.0b6,<3.0.0", - "eth-account>=0.5.6,<0.6.0", + "eth-account>=0.5.7,<0.6.0", "eth-hash[pycryptodome]>=0.2.0,<1.0.0", "eth-typing>=2.0.0,<3.0.0", "eth-utils>=1.9.5,<2.0.0",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,7 +7,7 @@\n \n extras_require = {\n 'tester': [\n- \"eth-tester[py-evm]==v0.6.0-beta.4\",\n+ \"eth-tester[py-evm]==v0.6.0-beta.6\",\n \"py-geth>=3.7.0,<4\",\n ],\n 'linter': [\n@@ -77,7 +77,7 @@\n install_requires=[\n \"aiohttp>=3.7.4.post0,<4\",\n \"eth-abi>=2.0.0b6,<3.0.0\",\n- \"eth-account>=0.5.6,<0.6.0\",\n+ \"eth-account>=0.5.7,<0.6.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n \"eth-typing>=2.0.0,<3.0.0\",\n \"eth-utils>=1.9.5,<2.0.0\",\n", "issue": "dependency conflict with py-evm 0.5* series\n* Version: 0.5.26\r\n* Python: 3.10\r\n* OS: linux\r\n\r\n### What was wrong?\r\n\r\nCurrent web3.py not compatible with py-evm `0.5*`. Below are the relevant lines from `pip install` output:\r\n\r\n```\r\nThe conflict is caused by:\r\n py-evm 0.5.0a2 depends on eth-utils<3.0.0 and >=2.0.0\r\n web3 5.26.0 depends on eth-utils<2.0.0 and >=1.9.5\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n 'tester': [\n \"eth-tester[py-evm]==v0.6.0-beta.4\",\n \"py-geth>=3.7.0,<4\",\n ],\n 'linter': [\n \"flake8==3.8.3\",\n \"isort>=4.2.15,<4.3.5\",\n \"mypy==0.910\",\n \"types-setuptools>=57.4.4,<58\",\n \"types-requests>=2.26.1,<3\",\n \"types-protobuf>=3.18.2,<4\",\n ],\n 'docs': [\n \"mock\",\n \"sphinx-better-theme>=0.1.4\",\n \"click>=5.1\",\n \"configparser==3.5.0\",\n \"contextlib2>=0.5.4\",\n \"py-geth>=3.6.0,<4\",\n \"py-solc>=0.4.0\",\n \"pytest>=4.4.0,<5.0.0\",\n \"sphinx>=3.0,<4\",\n \"sphinx_rtd_theme>=0.1.9\",\n \"toposort>=1.4\",\n \"towncrier==18.5.0\",\n \"urllib3\",\n \"wheel\"\n ],\n 'dev': [\n \"bumpversion\",\n \"flaky>=3.7.0,<4\",\n \"hypothesis>=3.31.2,<6\",\n \"pytest>=4.4.0,<5.0.0\",\n \"pytest-asyncio>=0.10.0,<0.11\",\n \"pytest-mock>=1.10,<2\",\n \"pytest-pythonpath>=0.3\",\n \"pytest-watch>=4.2,<5\",\n \"pytest-xdist>=1.29,<2\",\n \"setuptools>=38.6.0\",\n \"tox>=1.8.0\",\n \"tqdm>4.32,<5\",\n \"twine>=1.13,<2\",\n \"pluggy==0.13.1\",\n \"when-changed>=0.3.0,<0.4\"\n ]\n}\n\nextras_require['dev'] = (\n extras_require['tester']\n + extras_require['linter']\n + extras_require['docs']\n + extras_require['dev']\n)\n\nwith open('./README.md') as readme:\n long_description = readme.read()\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='5.26.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_content_type='text/markdown',\n long_description=long_description,\n author='Piper Merriam',\n author_email='pipermerriam@gmail.com',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"aiohttp>=3.7.4.post0,<4\",\n \"eth-abi>=2.0.0b6,<3.0.0\",\n \"eth-account>=0.5.6,<0.6.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n \"eth-typing>=2.0.0,<3.0.0\",\n \"eth-utils>=1.9.5,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"ipfshttpclient==0.8.0a2\",\n \"jsonschema>=3.2.0,<4.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"protobuf>=3.10.0,<4\",\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n \"typing-extensions>=3.7.4.1,<5;python_version<'3.8'\",\n \"websockets>=9.1,<10\",\n ],\n python_requires='>=3.6,<4',\n extras_require=extras_require,\n py_modules=['web3', 'ens', 'ethpm'],\n entry_points={\"pytest11\": [\"pytest_ethereum = web3.tools.pytest_ethereum.plugins\"]},\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"web3\": [\"py.typed\"]},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n 'tester': [\n \"eth-tester[py-evm]==v0.6.0-beta.6\",\n \"py-geth>=3.7.0,<4\",\n ],\n 'linter': [\n \"flake8==3.8.3\",\n \"isort>=4.2.15,<4.3.5\",\n \"mypy==0.910\",\n \"types-setuptools>=57.4.4,<58\",\n \"types-requests>=2.26.1,<3\",\n \"types-protobuf>=3.18.2,<4\",\n ],\n 'docs': [\n \"mock\",\n \"sphinx-better-theme>=0.1.4\",\n \"click>=5.1\",\n \"configparser==3.5.0\",\n \"contextlib2>=0.5.4\",\n \"py-geth>=3.6.0,<4\",\n \"py-solc>=0.4.0\",\n \"pytest>=4.4.0,<5.0.0\",\n \"sphinx>=3.0,<4\",\n \"sphinx_rtd_theme>=0.1.9\",\n \"toposort>=1.4\",\n \"towncrier==18.5.0\",\n \"urllib3\",\n \"wheel\"\n ],\n 'dev': [\n \"bumpversion\",\n \"flaky>=3.7.0,<4\",\n \"hypothesis>=3.31.2,<6\",\n \"pytest>=4.4.0,<5.0.0\",\n \"pytest-asyncio>=0.10.0,<0.11\",\n \"pytest-mock>=1.10,<2\",\n \"pytest-pythonpath>=0.3\",\n \"pytest-watch>=4.2,<5\",\n \"pytest-xdist>=1.29,<2\",\n \"setuptools>=38.6.0\",\n \"tox>=1.8.0\",\n \"tqdm>4.32,<5\",\n \"twine>=1.13,<2\",\n \"pluggy==0.13.1\",\n \"when-changed>=0.3.0,<0.4\"\n ]\n}\n\nextras_require['dev'] = (\n extras_require['tester']\n + extras_require['linter']\n + extras_require['docs']\n + extras_require['dev']\n)\n\nwith open('./README.md') as readme:\n long_description = readme.read()\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='5.26.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_content_type='text/markdown',\n long_description=long_description,\n author='Piper Merriam',\n author_email='pipermerriam@gmail.com',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"aiohttp>=3.7.4.post0,<4\",\n \"eth-abi>=2.0.0b6,<3.0.0\",\n \"eth-account>=0.5.7,<0.6.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n \"eth-typing>=2.0.0,<3.0.0\",\n \"eth-utils>=1.9.5,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"ipfshttpclient==0.8.0a2\",\n \"jsonschema>=3.2.0,<4.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"protobuf>=3.10.0,<4\",\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n \"typing-extensions>=3.7.4.1,<5;python_version<'3.8'\",\n \"websockets>=9.1,<10\",\n ],\n python_requires='>=3.6,<4',\n extras_require=extras_require,\n py_modules=['web3', 'ens', 'ethpm'],\n entry_points={\"pytest11\": [\"pytest_ethereum = web3.tools.pytest_ethereum.plugins\"]},\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"web3\": [\"py.typed\"]},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n)\n", "path": "setup.py"}]}
1,802
250
gh_patches_debug_4262
rasdani/github-patches
git_diff
certbot__certbot-772
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- rename /etc/letsencrypt/configs to /etc/letsencrypt/renewal Since it doesn't contain anything except renewal configuration files, people will probably find the current name confusing. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `letsencrypt/constants.py` Content: ``` 1 """Let's Encrypt constants.""" 2 import os 3 import logging 4 5 from acme import challenges 6 7 8 SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins" 9 """Setuptools entry point group name for plugins.""" 10 11 CLI_DEFAULTS = dict( 12 config_files=[ 13 "/etc/letsencrypt/cli.ini", 14 # http://freedesktop.org/wiki/Software/xdg-user-dirs/ 15 os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"), 16 "letsencrypt", "cli.ini"), 17 ], 18 verbose_count=-(logging.WARNING / 10), 19 server="https://acme-staging.api.letsencrypt.org/directory", 20 rsa_key_size=2048, 21 rollback_checkpoints=1, 22 config_dir="/etc/letsencrypt", 23 work_dir="/var/lib/letsencrypt", 24 logs_dir="/var/log/letsencrypt", 25 no_verify_ssl=False, 26 dvsni_port=challenges.DVSNI.PORT, 27 28 auth_cert_path="./cert.pem", 29 auth_chain_path="./chain.pem", 30 ) 31 """Defaults for CLI flags and `.IConfig` attributes.""" 32 33 34 RENEWER_DEFAULTS = dict( 35 renewer_enabled="yes", 36 renew_before_expiry="30 days", 37 deploy_before_expiry="20 days", 38 ) 39 """Defaults for renewer script.""" 40 41 42 EXCLUSIVE_CHALLENGES = frozenset([frozenset([ 43 challenges.DVSNI, challenges.SimpleHTTP])]) 44 """Mutually exclusive challenges.""" 45 46 47 ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"] 48 """List of possible :class:`letsencrypt.interfaces.IInstaller` 49 enhancements. 50 51 List of expected options parameters: 52 - redirect: None 53 - http-header: TODO 54 - ocsp-stapling: TODO 55 - spdy: TODO 56 57 """ 58 59 ARCHIVE_DIR = "archive" 60 """Archive directory, relative to `IConfig.config_dir`.""" 61 62 CONFIG_DIRS_MODE = 0o755 63 """Directory mode for ``.IConfig.config_dir`` et al.""" 64 65 ACCOUNTS_DIR = "accounts" 66 """Directory where all accounts are saved.""" 67 68 BACKUP_DIR = "backups" 69 """Directory (relative to `IConfig.work_dir`) where backups are kept.""" 70 71 CERT_DIR = "certs" 72 """See `.IConfig.cert_dir`.""" 73 74 CERT_KEY_BACKUP_DIR = "keys-certs" 75 """Directory where all certificates and keys are stored (relative to 76 `IConfig.work_dir`). Used for easy revocation.""" 77 78 IN_PROGRESS_DIR = "IN_PROGRESS" 79 """Directory used before a permanent checkpoint is finalized (relative to 80 `IConfig.work_dir`).""" 81 82 KEY_DIR = "keys" 83 """Directory (relative to `IConfig.config_dir`) where keys are saved.""" 84 85 LIVE_DIR = "live" 86 """Live directory, relative to `IConfig.config_dir`.""" 87 88 TEMP_CHECKPOINT_DIR = "temp_checkpoint" 89 """Temporary checkpoint directory (relative to `IConfig.work_dir`).""" 90 91 RENEWAL_CONFIGS_DIR = "configs" 92 """Renewal configs directory, relative to `IConfig.config_dir`.""" 93 94 RENEWER_CONFIG_FILENAME = "renewer.conf" 95 """Renewer config file name (relative to `IConfig.config_dir`).""" 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/letsencrypt/constants.py b/letsencrypt/constants.py --- a/letsencrypt/constants.py +++ b/letsencrypt/constants.py @@ -88,7 +88,7 @@ TEMP_CHECKPOINT_DIR = "temp_checkpoint" """Temporary checkpoint directory (relative to `IConfig.work_dir`).""" -RENEWAL_CONFIGS_DIR = "configs" +RENEWAL_CONFIGS_DIR = "renewal" """Renewal configs directory, relative to `IConfig.config_dir`.""" RENEWER_CONFIG_FILENAME = "renewer.conf"
{"golden_diff": "diff --git a/letsencrypt/constants.py b/letsencrypt/constants.py\n--- a/letsencrypt/constants.py\n+++ b/letsencrypt/constants.py\n@@ -88,7 +88,7 @@\n TEMP_CHECKPOINT_DIR = \"temp_checkpoint\"\n \"\"\"Temporary checkpoint directory (relative to `IConfig.work_dir`).\"\"\"\n \n-RENEWAL_CONFIGS_DIR = \"configs\"\n+RENEWAL_CONFIGS_DIR = \"renewal\"\n \"\"\"Renewal configs directory, relative to `IConfig.config_dir`.\"\"\"\n \n RENEWER_CONFIG_FILENAME = \"renewer.conf\"\n", "issue": "rename /etc/letsencrypt/configs to /etc/letsencrypt/renewal\nSince it doesn't contain anything except renewal configuration files, people will probably find the current name confusing.\n\n", "before_files": [{"content": "\"\"\"Let's Encrypt constants.\"\"\"\nimport os\nimport logging\n\nfrom acme import challenges\n\n\nSETUPTOOLS_PLUGINS_ENTRY_POINT = \"letsencrypt.plugins\"\n\"\"\"Setuptools entry point group name for plugins.\"\"\"\n\nCLI_DEFAULTS = dict(\n config_files=[\n \"/etc/letsencrypt/cli.ini\",\n # http://freedesktop.org/wiki/Software/xdg-user-dirs/\n os.path.join(os.environ.get(\"XDG_CONFIG_HOME\", \"~/.config\"),\n \"letsencrypt\", \"cli.ini\"),\n ],\n verbose_count=-(logging.WARNING / 10),\n server=\"https://acme-staging.api.letsencrypt.org/directory\",\n rsa_key_size=2048,\n rollback_checkpoints=1,\n config_dir=\"/etc/letsencrypt\",\n work_dir=\"/var/lib/letsencrypt\",\n logs_dir=\"/var/log/letsencrypt\",\n no_verify_ssl=False,\n dvsni_port=challenges.DVSNI.PORT,\n\n auth_cert_path=\"./cert.pem\",\n auth_chain_path=\"./chain.pem\",\n)\n\"\"\"Defaults for CLI flags and `.IConfig` attributes.\"\"\"\n\n\nRENEWER_DEFAULTS = dict(\n renewer_enabled=\"yes\",\n renew_before_expiry=\"30 days\",\n deploy_before_expiry=\"20 days\",\n)\n\"\"\"Defaults for renewer script.\"\"\"\n\n\nEXCLUSIVE_CHALLENGES = frozenset([frozenset([\n challenges.DVSNI, challenges.SimpleHTTP])])\n\"\"\"Mutually exclusive challenges.\"\"\"\n\n\nENHANCEMENTS = [\"redirect\", \"http-header\", \"ocsp-stapling\", \"spdy\"]\n\"\"\"List of possible :class:`letsencrypt.interfaces.IInstaller`\nenhancements.\n\nList of expected options parameters:\n- redirect: None\n- http-header: TODO\n- ocsp-stapling: TODO\n- spdy: TODO\n\n\"\"\"\n\nARCHIVE_DIR = \"archive\"\n\"\"\"Archive directory, relative to `IConfig.config_dir`.\"\"\"\n\nCONFIG_DIRS_MODE = 0o755\n\"\"\"Directory mode for ``.IConfig.config_dir`` et al.\"\"\"\n\nACCOUNTS_DIR = \"accounts\"\n\"\"\"Directory where all accounts are saved.\"\"\"\n\nBACKUP_DIR = \"backups\"\n\"\"\"Directory (relative to `IConfig.work_dir`) where backups are kept.\"\"\"\n\nCERT_DIR = \"certs\"\n\"\"\"See `.IConfig.cert_dir`.\"\"\"\n\nCERT_KEY_BACKUP_DIR = \"keys-certs\"\n\"\"\"Directory where all certificates and keys are stored (relative to\n`IConfig.work_dir`). Used for easy revocation.\"\"\"\n\nIN_PROGRESS_DIR = \"IN_PROGRESS\"\n\"\"\"Directory used before a permanent checkpoint is finalized (relative to\n`IConfig.work_dir`).\"\"\"\n\nKEY_DIR = \"keys\"\n\"\"\"Directory (relative to `IConfig.config_dir`) where keys are saved.\"\"\"\n\nLIVE_DIR = \"live\"\n\"\"\"Live directory, relative to `IConfig.config_dir`.\"\"\"\n\nTEMP_CHECKPOINT_DIR = \"temp_checkpoint\"\n\"\"\"Temporary checkpoint directory (relative to `IConfig.work_dir`).\"\"\"\n\nRENEWAL_CONFIGS_DIR = \"configs\"\n\"\"\"Renewal configs directory, relative to `IConfig.config_dir`.\"\"\"\n\nRENEWER_CONFIG_FILENAME = \"renewer.conf\"\n\"\"\"Renewer config file name (relative to `IConfig.config_dir`).\"\"\"\n", "path": "letsencrypt/constants.py"}], "after_files": [{"content": "\"\"\"Let's Encrypt constants.\"\"\"\nimport os\nimport logging\n\nfrom acme import challenges\n\n\nSETUPTOOLS_PLUGINS_ENTRY_POINT = \"letsencrypt.plugins\"\n\"\"\"Setuptools entry point group name for plugins.\"\"\"\n\nCLI_DEFAULTS = dict(\n config_files=[\n \"/etc/letsencrypt/cli.ini\",\n # http://freedesktop.org/wiki/Software/xdg-user-dirs/\n os.path.join(os.environ.get(\"XDG_CONFIG_HOME\", \"~/.config\"),\n \"letsencrypt\", \"cli.ini\"),\n ],\n verbose_count=-(logging.WARNING / 10),\n server=\"https://acme-staging.api.letsencrypt.org/directory\",\n rsa_key_size=2048,\n rollback_checkpoints=1,\n config_dir=\"/etc/letsencrypt\",\n work_dir=\"/var/lib/letsencrypt\",\n logs_dir=\"/var/log/letsencrypt\",\n no_verify_ssl=False,\n dvsni_port=challenges.DVSNI.PORT,\n\n auth_cert_path=\"./cert.pem\",\n auth_chain_path=\"./chain.pem\",\n)\n\"\"\"Defaults for CLI flags and `.IConfig` attributes.\"\"\"\n\n\nRENEWER_DEFAULTS = dict(\n renewer_enabled=\"yes\",\n renew_before_expiry=\"30 days\",\n deploy_before_expiry=\"20 days\",\n)\n\"\"\"Defaults for renewer script.\"\"\"\n\n\nEXCLUSIVE_CHALLENGES = frozenset([frozenset([\n challenges.DVSNI, challenges.SimpleHTTP])])\n\"\"\"Mutually exclusive challenges.\"\"\"\n\n\nENHANCEMENTS = [\"redirect\", \"http-header\", \"ocsp-stapling\", \"spdy\"]\n\"\"\"List of possible :class:`letsencrypt.interfaces.IInstaller`\nenhancements.\n\nList of expected options parameters:\n- redirect: None\n- http-header: TODO\n- ocsp-stapling: TODO\n- spdy: TODO\n\n\"\"\"\n\nARCHIVE_DIR = \"archive\"\n\"\"\"Archive directory, relative to `IConfig.config_dir`.\"\"\"\n\nCONFIG_DIRS_MODE = 0o755\n\"\"\"Directory mode for ``.IConfig.config_dir`` et al.\"\"\"\n\nACCOUNTS_DIR = \"accounts\"\n\"\"\"Directory where all accounts are saved.\"\"\"\n\nBACKUP_DIR = \"backups\"\n\"\"\"Directory (relative to `IConfig.work_dir`) where backups are kept.\"\"\"\n\nCERT_DIR = \"certs\"\n\"\"\"See `.IConfig.cert_dir`.\"\"\"\n\nCERT_KEY_BACKUP_DIR = \"keys-certs\"\n\"\"\"Directory where all certificates and keys are stored (relative to\n`IConfig.work_dir`). Used for easy revocation.\"\"\"\n\nIN_PROGRESS_DIR = \"IN_PROGRESS\"\n\"\"\"Directory used before a permanent checkpoint is finalized (relative to\n`IConfig.work_dir`).\"\"\"\n\nKEY_DIR = \"keys\"\n\"\"\"Directory (relative to `IConfig.config_dir`) where keys are saved.\"\"\"\n\nLIVE_DIR = \"live\"\n\"\"\"Live directory, relative to `IConfig.config_dir`.\"\"\"\n\nTEMP_CHECKPOINT_DIR = \"temp_checkpoint\"\n\"\"\"Temporary checkpoint directory (relative to `IConfig.work_dir`).\"\"\"\n\nRENEWAL_CONFIGS_DIR = \"renewal\"\n\"\"\"Renewal configs directory, relative to `IConfig.config_dir`.\"\"\"\n\nRENEWER_CONFIG_FILENAME = \"renewer.conf\"\n\"\"\"Renewer config file name (relative to `IConfig.config_dir`).\"\"\"\n", "path": "letsencrypt/constants.py"}]}
1,163
121
gh_patches_debug_34433
rasdani/github-patches
git_diff
pantsbuild__pants-14131
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ResolveError: Directory '{mydir}' does not contain any BUILD files (when Dockerizing packages) **Describe the bug** Created a repo at https://github.com/sureshjoshi/pantsbuild-14031 to help illustrate this problem. Essentially, I use custom output paths for my .pex files, and while testing out the `docker_image` target, I noticed some of my components fail with the error > ResolveError: Directory 'backend' does not contain any BUILD files After a lot of debugging, I only ran into this problem when my output folders were common to multiple `pex_binary` targets. For example, in the repo above, I have 3 identical projects (A, B, C) - where they only differ by the `pex_binary` `output_path` (and this location updated in the associated Dockerfile), and one of the projects refuses to compile. As per the README in the repo: ```bash # Should create a pex at dist/backend/projecta/projecta.pex # Docker image created successfully as projecta-container:latest ./pants package backend/projecta:: # Should create a pex at dist/backend.projectc/projectc.pex # Docker image created successfully as projectc-container:latest ./pants package backend/projectc:: ``` ```bash # Should create a pex at dist/backend/projectb.pex ./pants package backend/projectb:projectb # FAILS: With ResolveError ./pants package backend/projectb:projectb-container ``` So, the difference above is that Project C uses no `output_path` and uses the dot-syntax for the dist folder. ProjectA places the pex file under a `backend/projecta` directory. The failing ProjectB places the pex file directly under `backend`. This isn't a big issue, and easily worked around, and I'm guessing it has to do with namespacing or module/package semantics, but it's just a weird problem that is difficult to debug based on the error message. **Pants version** - 2.8.0 - 2.9.0rc1 **OS** macOS 12.1 Untested on Linux --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/python/pants/backend/docker/util_rules/dependencies.py` Content: ``` 1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest 5 from pants.backend.docker.target_types import DockerDependenciesField 6 from pants.core.goals.package import PackageFieldSet 7 from pants.engine.addresses import Addresses, UnparsedAddressInputs 8 from pants.engine.rules import Get, collect_rules, rule 9 from pants.engine.target import ( 10 FieldSetsPerTarget, 11 FieldSetsPerTargetRequest, 12 InjectDependenciesRequest, 13 InjectedDependencies, 14 Targets, 15 ) 16 from pants.engine.unions import UnionRule 17 18 19 class InjectDockerDependencies(InjectDependenciesRequest): 20 inject_for = DockerDependenciesField 21 22 23 @rule 24 async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies: 25 """Inspects COPY instructions in the Dockerfile for references to known targets.""" 26 dockerfile_info = await Get( 27 DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address) 28 ) 29 targets = await Get( 30 Targets, 31 UnparsedAddressInputs( 32 dockerfile_info.putative_target_addresses, 33 owning_address=None, 34 ), 35 ) 36 package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets)) 37 referenced_targets = ( 38 field_sets[0].address for field_sets in package.collection if len(field_sets) > 0 39 ) 40 return InjectedDependencies(Addresses(referenced_targets)) 41 42 43 def rules(): 44 return [ 45 *collect_rules(), 46 UnionRule(InjectDependenciesRequest, InjectDockerDependencies), 47 ] 48 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/python/pants/backend/docker/util_rules/dependencies.py b/src/python/pants/backend/docker/util_rules/dependencies.py --- a/src/python/pants/backend/docker/util_rules/dependencies.py +++ b/src/python/pants/backend/docker/util_rules/dependencies.py @@ -3,6 +3,7 @@ from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest from pants.backend.docker.target_types import DockerDependenciesField +from pants.base.specs import AddressSpecs, MaybeEmptySiblingAddresses from pants.core.goals.package import PackageFieldSet from pants.engine.addresses import Addresses, UnparsedAddressInputs from pants.engine.rules import Get, collect_rules, rule @@ -22,17 +23,28 @@ @rule async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies: - """Inspects COPY instructions in the Dockerfile for references to known targets.""" + """Inspects COPY instructions in the Dockerfile for references to known packagable targets.""" dockerfile_info = await Get( DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address) ) - targets = await Get( - Targets, + + # Parse all putative target addresses. + putative_addresses = await Get( + Addresses, UnparsedAddressInputs( dockerfile_info.putative_target_addresses, owning_address=None, ), ) + + # Get the target for those addresses that are known. + directories = {address.spec_path for address in putative_addresses} + all_addresses = await Get(Addresses, AddressSpecs(map(MaybeEmptySiblingAddresses, directories))) + targets = await Get( + Targets, Addresses((address for address in putative_addresses if address in all_addresses)) + ) + + # Only keep those targets that we can "package". package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets)) referenced_targets = ( field_sets[0].address for field_sets in package.collection if len(field_sets) > 0
{"golden_diff": "diff --git a/src/python/pants/backend/docker/util_rules/dependencies.py b/src/python/pants/backend/docker/util_rules/dependencies.py\n--- a/src/python/pants/backend/docker/util_rules/dependencies.py\n+++ b/src/python/pants/backend/docker/util_rules/dependencies.py\n@@ -3,6 +3,7 @@\n \n from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest\n from pants.backend.docker.target_types import DockerDependenciesField\n+from pants.base.specs import AddressSpecs, MaybeEmptySiblingAddresses\n from pants.core.goals.package import PackageFieldSet\n from pants.engine.addresses import Addresses, UnparsedAddressInputs\n from pants.engine.rules import Get, collect_rules, rule\n@@ -22,17 +23,28 @@\n \n @rule\n async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:\n- \"\"\"Inspects COPY instructions in the Dockerfile for references to known targets.\"\"\"\n+ \"\"\"Inspects COPY instructions in the Dockerfile for references to known packagable targets.\"\"\"\n dockerfile_info = await Get(\n DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)\n )\n- targets = await Get(\n- Targets,\n+\n+ # Parse all putative target addresses.\n+ putative_addresses = await Get(\n+ Addresses,\n UnparsedAddressInputs(\n dockerfile_info.putative_target_addresses,\n owning_address=None,\n ),\n )\n+\n+ # Get the target for those addresses that are known.\n+ directories = {address.spec_path for address in putative_addresses}\n+ all_addresses = await Get(Addresses, AddressSpecs(map(MaybeEmptySiblingAddresses, directories)))\n+ targets = await Get(\n+ Targets, Addresses((address for address in putative_addresses if address in all_addresses))\n+ )\n+\n+ # Only keep those targets that we can \"package\".\n package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))\n referenced_targets = (\n field_sets[0].address for field_sets in package.collection if len(field_sets) > 0\n", "issue": "ResolveError: Directory '{mydir}' does not contain any BUILD files (when Dockerizing packages)\n**Describe the bug**\r\n\r\nCreated a repo at https://github.com/sureshjoshi/pantsbuild-14031 to help illustrate this problem. \r\n\r\nEssentially, I use custom output paths for my .pex files, and while testing out the `docker_image` target, I noticed some of my components fail with the error \r\n\r\n> ResolveError: Directory 'backend' does not contain any BUILD files\r\n\r\nAfter a lot of debugging, I only ran into this problem when my output folders were common to multiple `pex_binary` targets. \r\n\r\nFor example, in the repo above, I have 3 identical projects (A, B, C) - where they only differ by the `pex_binary` `output_path` (and this location updated in the associated Dockerfile), and one of the projects refuses to compile.\r\n\r\nAs per the README in the repo:\r\n\r\n```bash\r\n# Should create a pex at dist/backend/projecta/projecta.pex\r\n# Docker image created successfully as projecta-container:latest\r\n./pants package backend/projecta::\r\n\r\n# Should create a pex at dist/backend.projectc/projectc.pex\r\n# Docker image created successfully as projectc-container:latest\r\n./pants package backend/projectc::\r\n```\r\n\r\n```bash\r\n# Should create a pex at dist/backend/projectb.pex\r\n./pants package backend/projectb:projectb\r\n\r\n# FAILS: With ResolveError\r\n./pants package backend/projectb:projectb-container \r\n```\r\n\r\nSo, the difference above is that Project C uses no `output_path` and uses the dot-syntax for the dist folder. ProjectA places the pex file under a `backend/projecta` directory. The failing ProjectB places the pex file directly under `backend`.\r\n\r\nThis isn't a big issue, and easily worked around, and I'm guessing it has to do with namespacing or module/package semantics, but it's just a weird problem that is difficult to debug based on the error message.\r\n\r\n**Pants version**\r\n\r\n- 2.8.0\r\n- 2.9.0rc1\r\n\r\n**OS**\r\n\r\nmacOS 12.1\r\nUntested on Linux\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest\nfrom pants.backend.docker.target_types import DockerDependenciesField\nfrom pants.core.goals.package import PackageFieldSet\nfrom pants.engine.addresses import Addresses, UnparsedAddressInputs\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.engine.target import (\n FieldSetsPerTarget,\n FieldSetsPerTargetRequest,\n InjectDependenciesRequest,\n InjectedDependencies,\n Targets,\n)\nfrom pants.engine.unions import UnionRule\n\n\nclass InjectDockerDependencies(InjectDependenciesRequest):\n inject_for = DockerDependenciesField\n\n\n@rule\nasync def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:\n \"\"\"Inspects COPY instructions in the Dockerfile for references to known targets.\"\"\"\n dockerfile_info = await Get(\n DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)\n )\n targets = await Get(\n Targets,\n UnparsedAddressInputs(\n dockerfile_info.putative_target_addresses,\n owning_address=None,\n ),\n )\n package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))\n referenced_targets = (\n field_sets[0].address for field_sets in package.collection if len(field_sets) > 0\n )\n return InjectedDependencies(Addresses(referenced_targets))\n\n\ndef rules():\n return [\n *collect_rules(),\n UnionRule(InjectDependenciesRequest, InjectDockerDependencies),\n ]\n", "path": "src/python/pants/backend/docker/util_rules/dependencies.py"}], "after_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest\nfrom pants.backend.docker.target_types import DockerDependenciesField\nfrom pants.base.specs import AddressSpecs, MaybeEmptySiblingAddresses\nfrom pants.core.goals.package import PackageFieldSet\nfrom pants.engine.addresses import Addresses, UnparsedAddressInputs\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.engine.target import (\n FieldSetsPerTarget,\n FieldSetsPerTargetRequest,\n InjectDependenciesRequest,\n InjectedDependencies,\n Targets,\n)\nfrom pants.engine.unions import UnionRule\n\n\nclass InjectDockerDependencies(InjectDependenciesRequest):\n inject_for = DockerDependenciesField\n\n\n@rule\nasync def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:\n \"\"\"Inspects COPY instructions in the Dockerfile for references to known packagable targets.\"\"\"\n dockerfile_info = await Get(\n DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)\n )\n\n # Parse all putative target addresses.\n putative_addresses = await Get(\n Addresses,\n UnparsedAddressInputs(\n dockerfile_info.putative_target_addresses,\n owning_address=None,\n ),\n )\n\n # Get the target for those addresses that are known.\n directories = {address.spec_path for address in putative_addresses}\n all_addresses = await Get(Addresses, AddressSpecs(map(MaybeEmptySiblingAddresses, directories)))\n targets = await Get(\n Targets, Addresses((address for address in putative_addresses if address in all_addresses))\n )\n\n # Only keep those targets that we can \"package\".\n package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))\n referenced_targets = (\n field_sets[0].address for field_sets in package.collection if len(field_sets) > 0\n )\n return InjectedDependencies(Addresses(referenced_targets))\n\n\ndef rules():\n return [\n *collect_rules(),\n UnionRule(InjectDependenciesRequest, InjectDockerDependencies),\n ]\n", "path": "src/python/pants/backend/docker/util_rules/dependencies.py"}]}
1,169
448
gh_patches_debug_22472
rasdani/github-patches
git_diff
mozilla__kitsune-3198
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove all references to ethn.io and Treejack Confirm is ethn.io is something we still use, remove otherwise https://github.com/mozilla/kitsune/blob/master/kitsune/products/jinja2/products/product.html#L92 - [ ] remove treejack waffle.switch - [ ] remove waffle.flag('ethnio-all') Remove all references to ethn.io and Treejack Confirm is ethn.io is something we still use, remove otherwise https://github.com/mozilla/kitsune/blob/master/kitsune/products/jinja2/products/product.html#L92 - [ ] remove treejack waffle.switch - [ ] remove waffle.flag('ethnio-all') --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kitsune/sumo/migrations/0002_initial_data.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from __future__ import unicode_literals 3 4 from django.db import models, migrations 5 6 7 def create_ratelimit_bypass_perm(apps, schema_editor): 8 # First we get or create the content type. 9 ContentType = apps.get_model('contenttypes', 'ContentType') 10 global_permission_ct, created = ContentType.objects.get_or_create( 11 model='global_permission', app_label='sumo') 12 13 # Then we create a permission attached to that content type. 14 Permission = apps.get_model('auth', 'Permission') 15 perm = Permission.objects.create( 16 name='Bypass Ratelimits', 17 content_type=global_permission_ct, 18 codename='bypass_ratelimit') 19 20 21 def remove_ratelimit_bypass_perm(apps, schema_editor): 22 Permission = apps.get_model('auth', 'Permission') 23 perm = Permission.objects.filter(codename='bypass_ratelimit').delete() 24 25 26 def create_treejack_switch(apps, schema_editor): 27 Switch = apps.get_model('waffle', 'Switch') 28 Switch.objects.create( 29 name='treejack', 30 note='Enables/disables the Treejack snippet.', 31 active=False) 32 33 34 def remove_treejack_switch(apps, schema_editor): 35 Switch = apps.get_model('waffle', 'Switch') 36 Switch.objects.filter(name='treejack').delete() 37 38 39 def create_refresh_survey_flag(apps, schema_editor): 40 Sample = apps.get_model('waffle', 'Sample') 41 Sample.objects.get_or_create( 42 name='refresh-survey', 43 note='Samples users that refresh Firefox to give them a survey.', 44 percent=50.0) 45 46 47 def remove_refresh_survey_flag(apps, schema_editor): 48 Sample = apps.get_model('waffle', 'Sample') 49 Sample.objects.filter(name='refresh-survey').delete() 50 51 52 class Migration(migrations.Migration): 53 54 dependencies = [ 55 ('sumo', '0001_initial'), 56 ('auth', '0001_initial'), 57 ('contenttypes', '0001_initial'), 58 ('waffle', '0001_initial'), 59 ] 60 61 operations = [ 62 migrations.RunPython(create_ratelimit_bypass_perm, remove_ratelimit_bypass_perm), 63 migrations.RunPython(create_treejack_switch, remove_treejack_switch), 64 migrations.RunPython(create_refresh_survey_flag, remove_refresh_survey_flag), 65 ] 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kitsune/sumo/migrations/0002_initial_data.py b/kitsune/sumo/migrations/0002_initial_data.py --- a/kitsune/sumo/migrations/0002_initial_data.py +++ b/kitsune/sumo/migrations/0002_initial_data.py @@ -23,19 +23,6 @@ perm = Permission.objects.filter(codename='bypass_ratelimit').delete() -def create_treejack_switch(apps, schema_editor): - Switch = apps.get_model('waffle', 'Switch') - Switch.objects.create( - name='treejack', - note='Enables/disables the Treejack snippet.', - active=False) - - -def remove_treejack_switch(apps, schema_editor): - Switch = apps.get_model('waffle', 'Switch') - Switch.objects.filter(name='treejack').delete() - - def create_refresh_survey_flag(apps, schema_editor): Sample = apps.get_model('waffle', 'Sample') Sample.objects.get_or_create( @@ -60,6 +47,5 @@ operations = [ migrations.RunPython(create_ratelimit_bypass_perm, remove_ratelimit_bypass_perm), - migrations.RunPython(create_treejack_switch, remove_treejack_switch), migrations.RunPython(create_refresh_survey_flag, remove_refresh_survey_flag), ]
{"golden_diff": "diff --git a/kitsune/sumo/migrations/0002_initial_data.py b/kitsune/sumo/migrations/0002_initial_data.py\n--- a/kitsune/sumo/migrations/0002_initial_data.py\n+++ b/kitsune/sumo/migrations/0002_initial_data.py\n@@ -23,19 +23,6 @@\n perm = Permission.objects.filter(codename='bypass_ratelimit').delete()\n \n \n-def create_treejack_switch(apps, schema_editor):\n- Switch = apps.get_model('waffle', 'Switch')\n- Switch.objects.create(\n- name='treejack',\n- note='Enables/disables the Treejack snippet.',\n- active=False)\n-\n-\n-def remove_treejack_switch(apps, schema_editor):\n- Switch = apps.get_model('waffle', 'Switch')\n- Switch.objects.filter(name='treejack').delete()\n-\n-\n def create_refresh_survey_flag(apps, schema_editor):\n Sample = apps.get_model('waffle', 'Sample')\n Sample.objects.get_or_create(\n@@ -60,6 +47,5 @@\n \n operations = [\n migrations.RunPython(create_ratelimit_bypass_perm, remove_ratelimit_bypass_perm),\n- migrations.RunPython(create_treejack_switch, remove_treejack_switch),\n migrations.RunPython(create_refresh_survey_flag, remove_refresh_survey_flag),\n ]\n", "issue": "Remove all references to ethn.io and Treejack\nConfirm is ethn.io is something we still use, remove otherwise\r\n\r\nhttps://github.com/mozilla/kitsune/blob/master/kitsune/products/jinja2/products/product.html#L92\r\n\r\n- [ ] remove treejack waffle.switch\r\n- [ ] remove waffle.flag('ethnio-all')\nRemove all references to ethn.io and Treejack\nConfirm is ethn.io is something we still use, remove otherwise\r\n\r\nhttps://github.com/mozilla/kitsune/blob/master/kitsune/products/jinja2/products/product.html#L92\r\n\r\n- [ ] remove treejack waffle.switch\r\n- [ ] remove waffle.flag('ethnio-all')\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef create_ratelimit_bypass_perm(apps, schema_editor):\n # First we get or create the content type.\n ContentType = apps.get_model('contenttypes', 'ContentType')\n global_permission_ct, created = ContentType.objects.get_or_create(\n model='global_permission', app_label='sumo')\n\n # Then we create a permission attached to that content type.\n Permission = apps.get_model('auth', 'Permission')\n perm = Permission.objects.create(\n name='Bypass Ratelimits',\n content_type=global_permission_ct,\n codename='bypass_ratelimit')\n\n\ndef remove_ratelimit_bypass_perm(apps, schema_editor):\n Permission = apps.get_model('auth', 'Permission')\n perm = Permission.objects.filter(codename='bypass_ratelimit').delete()\n\n\ndef create_treejack_switch(apps, schema_editor):\n Switch = apps.get_model('waffle', 'Switch')\n Switch.objects.create(\n name='treejack',\n note='Enables/disables the Treejack snippet.',\n active=False)\n\n\ndef remove_treejack_switch(apps, schema_editor):\n Switch = apps.get_model('waffle', 'Switch')\n Switch.objects.filter(name='treejack').delete()\n\n\ndef create_refresh_survey_flag(apps, schema_editor):\n Sample = apps.get_model('waffle', 'Sample')\n Sample.objects.get_or_create(\n name='refresh-survey',\n note='Samples users that refresh Firefox to give them a survey.',\n percent=50.0)\n\n\ndef remove_refresh_survey_flag(apps, schema_editor):\n Sample = apps.get_model('waffle', 'Sample')\n Sample.objects.filter(name='refresh-survey').delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sumo', '0001_initial'),\n ('auth', '0001_initial'),\n ('contenttypes', '0001_initial'),\n ('waffle', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(create_ratelimit_bypass_perm, remove_ratelimit_bypass_perm),\n migrations.RunPython(create_treejack_switch, remove_treejack_switch),\n migrations.RunPython(create_refresh_survey_flag, remove_refresh_survey_flag),\n ]\n", "path": "kitsune/sumo/migrations/0002_initial_data.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef create_ratelimit_bypass_perm(apps, schema_editor):\n # First we get or create the content type.\n ContentType = apps.get_model('contenttypes', 'ContentType')\n global_permission_ct, created = ContentType.objects.get_or_create(\n model='global_permission', app_label='sumo')\n\n # Then we create a permission attached to that content type.\n Permission = apps.get_model('auth', 'Permission')\n perm = Permission.objects.create(\n name='Bypass Ratelimits',\n content_type=global_permission_ct,\n codename='bypass_ratelimit')\n\n\ndef remove_ratelimit_bypass_perm(apps, schema_editor):\n Permission = apps.get_model('auth', 'Permission')\n perm = Permission.objects.filter(codename='bypass_ratelimit').delete()\n\n\ndef create_refresh_survey_flag(apps, schema_editor):\n Sample = apps.get_model('waffle', 'Sample')\n Sample.objects.get_or_create(\n name='refresh-survey',\n note='Samples users that refresh Firefox to give them a survey.',\n percent=50.0)\n\n\ndef remove_refresh_survey_flag(apps, schema_editor):\n Sample = apps.get_model('waffle', 'Sample')\n Sample.objects.filter(name='refresh-survey').delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sumo', '0001_initial'),\n ('auth', '0001_initial'),\n ('contenttypes', '0001_initial'),\n ('waffle', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(create_ratelimit_bypass_perm, remove_ratelimit_bypass_perm),\n migrations.RunPython(create_refresh_survey_flag, remove_refresh_survey_flag),\n ]\n", "path": "kitsune/sumo/migrations/0002_initial_data.py"}]}
1,040
304
gh_patches_debug_25252
rasdani/github-patches
git_diff
networkx__networkx-3016
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `OrderedGraph.subgraph` does not maintain the order of the nodes A subgraph built from a `OrderedGraph` should keep the order of the nodes, yet nodes in the subgraph are neither in the order of the initial graph, nor in the order of the selection. The issue can be seen from the following snippet: ```python graph = nx.OrderedGraph() nodes = list(range(10)) random.shuffle(nodes) graph.add_nodes_from(nodes) # key order is (7, 2, 1, 9, 0, 8, 6, 4, 3, 5) # We create a selection in the same order as the initial graph keys to_keep = [key for key in graph if key % 2 == 0] # [2, 0, 8, 6, 4] subgraph = graph.subgraph(to_keep) # (0, 2, 4, 6, 8) # We create a selection in a different order subgraph = graph.subgraph([5, 3, 1]) # (1, 3, 5) ``` From what I see, the issue is due to `Graph.subgraph` passing the selection to `nx.filters.show_nodes` that transforms it to a set. The nodes in the subgraph are then in the order of the set; as sets do not preserve the order, the order of the initial graph if not preserved. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `networkx/classes/ordered.py` Content: ``` 1 """ 2 Consistently ordered variants of the default base classes. 3 4 The Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of 5 nodes and edges. The order of node reporting agrees with node adding, but for 6 edges, the order is not necessarily the order that the edges were added. 7 8 In general, you should use the default (i.e., unordered) graph classes. 9 However, there are times (e.g., when testing) when you may need the 10 order preserved. 11 """ 12 from collections import OrderedDict 13 14 from .graph import Graph 15 from .multigraph import MultiGraph 16 from .digraph import DiGraph 17 from .multidigraph import MultiDiGraph 18 19 __all__ = [] 20 21 __all__.extend([ 22 'OrderedGraph', 23 'OrderedDiGraph', 24 'OrderedMultiGraph', 25 'OrderedMultiDiGraph', 26 ]) 27 28 29 class OrderedGraph(Graph): 30 """Consistently ordered variant of :class:`~networkx.Graph`.""" 31 node_dict_factory = OrderedDict 32 adjlist_outer_dict_factory = OrderedDict 33 adjlist_inner_dict_factory = OrderedDict 34 edge_attr_dict_factory = OrderedDict 35 36 def fresh_copy(self): 37 """Return a fresh copy graph with the same data structure. 38 39 A fresh copy has no nodes, edges or graph attributes. It is 40 the same data structure as the current graph. This method is 41 typically used to create an empty version of the graph. 42 """ 43 return OrderedGraph() 44 45 46 class OrderedDiGraph(DiGraph): 47 """Consistently ordered variant of :class:`~networkx.DiGraph`.""" 48 node_dict_factory = OrderedDict 49 adjlist_outer_dict_factory = OrderedDict 50 adjlist_inner_dict_factory = OrderedDict 51 edge_attr_dict_factory = OrderedDict 52 53 def fresh_copy(self): 54 """Return a fresh copy graph with the same data structure. 55 56 A fresh copy has no nodes, edges or graph attributes. It is 57 the same data structure as the current graph. This method is 58 typically used to create an empty version of the graph. 59 """ 60 return OrderedDiGraph() 61 62 63 class OrderedMultiGraph(MultiGraph): 64 """Consistently ordered variant of :class:`~networkx.MultiGraph`.""" 65 node_dict_factory = OrderedDict 66 adjlist_outer_dict_factory = OrderedDict 67 adjlist_inner_dict_factory = OrderedDict 68 edge_key_dict_factory = OrderedDict 69 edge_attr_dict_factory = OrderedDict 70 71 def fresh_copy(self): 72 """Return a fresh copy graph with the same data structure. 73 74 A fresh copy has no nodes, edges or graph attributes. It is 75 the same data structure as the current graph. This method is 76 typically used to create an empty version of the graph. 77 """ 78 return OrderedMultiGraph() 79 80 81 class OrderedMultiDiGraph(MultiDiGraph): 82 """Consistently ordered variant of :class:`~networkx.MultiDiGraph`.""" 83 node_dict_factory = OrderedDict 84 adjlist_outer_dict_factory = OrderedDict 85 adjlist_inner_dict_factory = OrderedDict 86 edge_key_dict_factory = OrderedDict 87 edge_attr_dict_factory = OrderedDict 88 89 def fresh_copy(self): 90 """Return a fresh copy graph with the same data structure. 91 92 A fresh copy has no nodes, edges or graph attributes. It is 93 the same data structure as the current graph. This method is 94 typically used to create an empty version of the graph. 95 """ 96 return OrderedMultiDiGraph() 97 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/networkx/classes/ordered.py b/networkx/classes/ordered.py --- a/networkx/classes/ordered.py +++ b/networkx/classes/ordered.py @@ -1,5 +1,10 @@ """ Consistently ordered variants of the default base classes. +Note that if you are using Python 3.6, you shouldn't need these classes +because the dicts in Python 3.6 are ordered. +Note also that there are many differing expectations for the word "ordered" +and that these classes may not provide the order you expect. +The intent here is to give a consistent order not a particular order. The Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of nodes and edges. The order of node reporting agrees with node adding, but for @@ -8,6 +13,17 @@ In general, you should use the default (i.e., unordered) graph classes. However, there are times (e.g., when testing) when you may need the order preserved. + +Special care is required when using subgraphs of the Ordered classes. +The order of nodes in the subclass is not necessarily the same order +as the original class. In general it is probably better to avoid using +subgraphs and replace with code similar to: + + # instead of SG = G.subgraph(ordered_nodes) + SG=nx.OrderedGraph() + SG.add_nodes_from(ordered_nodes) + SG.add_edges_from((u, v) for (u, v) in G.edges() if u in SG if v in SG) + """ from collections import OrderedDict
{"golden_diff": "diff --git a/networkx/classes/ordered.py b/networkx/classes/ordered.py\n--- a/networkx/classes/ordered.py\n+++ b/networkx/classes/ordered.py\n@@ -1,5 +1,10 @@\n \"\"\"\n Consistently ordered variants of the default base classes.\n+Note that if you are using Python 3.6, you shouldn't need these classes\n+because the dicts in Python 3.6 are ordered.\n+Note also that there are many differing expectations for the word \"ordered\"\n+and that these classes may not provide the order you expect.\n+The intent here is to give a consistent order not a particular order.\n \n The Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of\n nodes and edges. The order of node reporting agrees with node adding, but for\n@@ -8,6 +13,17 @@\n In general, you should use the default (i.e., unordered) graph classes.\n However, there are times (e.g., when testing) when you may need the\n order preserved.\n+\n+Special care is required when using subgraphs of the Ordered classes.\n+The order of nodes in the subclass is not necessarily the same order\n+as the original class. In general it is probably better to avoid using\n+subgraphs and replace with code similar to:\n+\n+ # instead of SG = G.subgraph(ordered_nodes)\n+ SG=nx.OrderedGraph()\n+ SG.add_nodes_from(ordered_nodes)\n+ SG.add_edges_from((u, v) for (u, v) in G.edges() if u in SG if v in SG)\n+\n \"\"\"\n from collections import OrderedDict\n", "issue": "`OrderedGraph.subgraph` does not maintain the order of the nodes\nA subgraph built from a `OrderedGraph` should keep the order of the nodes, yet nodes in the subgraph are neither in the order of the initial graph, nor in the order of the selection. The issue can be seen from the following snippet:\r\n\r\n```python\r\ngraph = nx.OrderedGraph()\r\nnodes = list(range(10))\r\nrandom.shuffle(nodes)\r\ngraph.add_nodes_from(nodes) # key order is (7, 2, 1, 9, 0, 8, 6, 4, 3, 5)\r\n\r\n# We create a selection in the same order as the initial graph keys\r\nto_keep = [key for key in graph if key % 2 == 0] # [2, 0, 8, 6, 4]\r\nsubgraph = graph.subgraph(to_keep) # (0, 2, 4, 6, 8)\r\n\r\n# We create a selection in a different order\r\nsubgraph = graph.subgraph([5, 3, 1]) # (1, 3, 5)\r\n```\r\n\r\nFrom what I see, the issue is due to `Graph.subgraph` passing the selection to `nx.filters.show_nodes` that transforms it to a set. The nodes in the subgraph are then in the order of the set; as sets do not preserve the order, the order of the initial graph if not preserved.\n", "before_files": [{"content": "\"\"\"\nConsistently ordered variants of the default base classes.\n\nThe Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of\nnodes and edges. The order of node reporting agrees with node adding, but for\nedges, the order is not necessarily the order that the edges were added.\n\nIn general, you should use the default (i.e., unordered) graph classes.\nHowever, there are times (e.g., when testing) when you may need the\norder preserved.\n\"\"\"\nfrom collections import OrderedDict\n\nfrom .graph import Graph\nfrom .multigraph import MultiGraph\nfrom .digraph import DiGraph\nfrom .multidigraph import MultiDiGraph\n\n__all__ = []\n\n__all__.extend([\n 'OrderedGraph',\n 'OrderedDiGraph',\n 'OrderedMultiGraph',\n 'OrderedMultiDiGraph',\n])\n\n\nclass OrderedGraph(Graph):\n \"\"\"Consistently ordered variant of :class:`~networkx.Graph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedGraph()\n\n\nclass OrderedDiGraph(DiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.DiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedDiGraph()\n\n\nclass OrderedMultiGraph(MultiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.MultiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_key_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedMultiGraph()\n\n\nclass OrderedMultiDiGraph(MultiDiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.MultiDiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_key_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedMultiDiGraph()\n", "path": "networkx/classes/ordered.py"}], "after_files": [{"content": "\"\"\"\nConsistently ordered variants of the default base classes.\nNote that if you are using Python 3.6, you shouldn't need these classes\nbecause the dicts in Python 3.6 are ordered.\nNote also that there are many differing expectations for the word \"ordered\"\nand that these classes may not provide the order you expect.\nThe intent here is to give a consistent order not a particular order.\n\nThe Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of\nnodes and edges. The order of node reporting agrees with node adding, but for\nedges, the order is not necessarily the order that the edges were added.\n\nIn general, you should use the default (i.e., unordered) graph classes.\nHowever, there are times (e.g., when testing) when you may need the\norder preserved.\n\nSpecial care is required when using subgraphs of the Ordered classes.\nThe order of nodes in the subclass is not necessarily the same order\nas the original class. In general it is probably better to avoid using\nsubgraphs and replace with code similar to:\n\n # instead of SG = G.subgraph(ordered_nodes)\n SG=nx.OrderedGraph()\n SG.add_nodes_from(ordered_nodes)\n SG.add_edges_from((u, v) for (u, v) in G.edges() if u in SG if v in SG)\n\n\"\"\"\nfrom collections import OrderedDict\n\nfrom .graph import Graph\nfrom .multigraph import MultiGraph\nfrom .digraph import DiGraph\nfrom .multidigraph import MultiDiGraph\n\n__all__ = []\n\n__all__.extend([\n 'OrderedGraph',\n 'OrderedDiGraph',\n 'OrderedMultiGraph',\n 'OrderedMultiDiGraph',\n])\n\n\nclass OrderedGraph(Graph):\n \"\"\"Consistently ordered variant of :class:`~networkx.Graph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedGraph()\n\n\nclass OrderedDiGraph(DiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.DiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedDiGraph()\n\n\nclass OrderedMultiGraph(MultiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.MultiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_key_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedMultiGraph()\n\n\nclass OrderedMultiDiGraph(MultiDiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.MultiDiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_key_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedMultiDiGraph()\n", "path": "networkx/classes/ordered.py"}]}
1,474
352
gh_patches_debug_16045
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-1601
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Separate XLIFF export options ### Motivation <!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. --> Apparently, the two options to export XLIFF files (either all pages or only public pages) are too similar and lead to accidental misclicks. ### Proposed Solution <!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. --> - Move the public option to the top (since this is the most used) - Reword the "all pages" option to make it more obvious that this includes drafts - Maybe add an exclamation mark or similar to the "all pages" option ### Alternatives <!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. --> ### Additional Context <!-- Add any other information or screenshots about the feature request here. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `integreat_cms/cms/views/pages/page_bulk_actions.py` Content: ``` 1 import logging 2 3 from django.contrib import messages 4 from django.shortcuts import get_object_or_404 5 from django.utils.translation import ugettext as _ 6 from django.views.generic.list import MultipleObjectMixin 7 8 from ....xliff.utils import pages_to_xliff_file 9 from ...models import Page 10 from ...utils.pdf_utils import generate_pdf 11 from ...utils.translation_utils import ugettext_many_lazy as __ 12 from ..bulk_action_views import BulkActionView 13 14 logger = logging.getLogger(__name__) 15 16 17 class PageBulkActionMixin(MultipleObjectMixin): 18 """ 19 Mixin for page bulk actions 20 """ 21 22 #: The model of this :class:`~integreat_cms.cms.views.bulk_action_views.BulkActionView` 23 model = Page 24 25 26 # pylint: disable=too-many-ancestors 27 class GeneratePdfView(PageBulkActionMixin, BulkActionView): 28 """ 29 Bulk action for generating a PDF document of the content 30 """ 31 32 #: Whether the view requires change permissions 33 require_change_permission = False 34 #: Whether the public translation objects should be prefetched 35 prefetch_public_translations = True 36 37 def post(self, request, *args, **kwargs): 38 r""" 39 Apply the bulk action on every item in the queryset and redirect 40 41 :param request: The current request 42 :type request: ~django.http.HttpResponse 43 44 :param \*args: The supplied arguments 45 :type \*args: list 46 47 :param \**kwargs: The supplied keyword arguments 48 :type \**kwargs: dict 49 50 :return: The redirect 51 :rtype: ~django.http.HttpResponseRedirect 52 """ 53 # Generate PDF document and redirect to it 54 return generate_pdf( 55 request.region, 56 kwargs.get("language_slug"), 57 self.get_queryset(), 58 ) 59 60 61 # pylint: disable=too-many-ancestors 62 class ExportXliffView(PageBulkActionMixin, BulkActionView): 63 """ 64 Bulk action for generating XLIFF files for translations 65 """ 66 67 #: Whether only public translation should be exported 68 only_public = False 69 #: Whether the view requires change permissions 70 require_change_permission = False 71 72 def post(self, request, *args, **kwargs): 73 r""" 74 Function for handling a pdf export request for pages. 75 The pages get extracted from request.GET attribute and the request is forwarded to :func:`~integreat_cms.cms.utils.pdf_utils.generate_pdf` 76 77 :param request: The current request 78 :type request: ~django.http.HttpResponse 79 80 :param \*args: The supplied arguments 81 :type \*args: list 82 83 :param \**kwargs: The supplied keyword arguments 84 :type \**kwargs: dict 85 86 :return: The redirect 87 :rtype: ~django.http.HttpResponseRedirect 88 """ 89 target_language = get_object_or_404( 90 self.request.region.language_tree_nodes, 91 language__slug=kwargs.get("language_slug"), 92 parent__isnull=False, 93 ).language 94 95 xliff_file_url = pages_to_xliff_file( 96 request, self.get_queryset(), target_language, only_public=self.only_public 97 ) 98 if xliff_file_url: 99 # Insert link with automatic download into success message 100 messages.success( 101 request, 102 __( 103 _("XLIFF file for translation to {} successfully created.").format( 104 target_language 105 ), 106 _( 107 "If the download does not start automatically, please click {}here{}." 108 ).format( 109 f"<a data-auto-download href='{xliff_file_url}' class='font-bold underline hover:no-underline' download>", 110 "</a>", 111 ), 112 ), 113 ) 114 115 # Let the base view handle the redirect 116 return super().post(request, *args, **kwargs) 117 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/integreat_cms/cms/views/pages/page_bulk_actions.py b/integreat_cms/cms/views/pages/page_bulk_actions.py --- a/integreat_cms/cms/views/pages/page_bulk_actions.py +++ b/integreat_cms/cms/views/pages/page_bulk_actions.py @@ -100,9 +100,13 @@ messages.success( request, __( - _("XLIFF file for translation to {} successfully created.").format( - target_language - ), + _( + "XLIFF file with published pages only for translation to {} successfully created." + ).format(target_language) + if self.only_public + else _( + "XLIFF file with unpublished and published pages for translation to {} successfully created." + ).format(target_language), _( "If the download does not start automatically, please click {}here{}." ).format(
{"golden_diff": "diff --git a/integreat_cms/cms/views/pages/page_bulk_actions.py b/integreat_cms/cms/views/pages/page_bulk_actions.py\n--- a/integreat_cms/cms/views/pages/page_bulk_actions.py\n+++ b/integreat_cms/cms/views/pages/page_bulk_actions.py\n@@ -100,9 +100,13 @@\n messages.success(\n request,\n __(\n- _(\"XLIFF file for translation to {} successfully created.\").format(\n- target_language\n- ),\n+ _(\n+ \"XLIFF file with published pages only for translation to {} successfully created.\"\n+ ).format(target_language)\n+ if self.only_public\n+ else _(\n+ \"XLIFF file with unpublished and published pages for translation to {} successfully created.\"\n+ ).format(target_language),\n _(\n \"If the download does not start automatically, please click {}here{}.\"\n ).format(\n", "issue": "Separate XLIFF export options\n### Motivation\r\n<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->\r\nApparently, the two options to export XLIFF files (either all pages or only public pages) are too similar and lead to accidental misclicks.\r\n\r\n### Proposed Solution\r\n<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->\r\n- Move the public option to the top (since this is the most used)\r\n- Reword the \"all pages\" option to make it more obvious that this includes drafts\r\n- Maybe add an exclamation mark or similar to the \"all pages\" option\r\n\r\n### Alternatives\r\n<!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. -->\r\n\r\n\r\n### Additional Context\r\n<!-- Add any other information or screenshots about the feature request here. -->\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic.list import MultipleObjectMixin\n\nfrom ....xliff.utils import pages_to_xliff_file\nfrom ...models import Page\nfrom ...utils.pdf_utils import generate_pdf\nfrom ...utils.translation_utils import ugettext_many_lazy as __\nfrom ..bulk_action_views import BulkActionView\n\nlogger = logging.getLogger(__name__)\n\n\nclass PageBulkActionMixin(MultipleObjectMixin):\n \"\"\"\n Mixin for page bulk actions\n \"\"\"\n\n #: The model of this :class:`~integreat_cms.cms.views.bulk_action_views.BulkActionView`\n model = Page\n\n\n# pylint: disable=too-many-ancestors\nclass GeneratePdfView(PageBulkActionMixin, BulkActionView):\n \"\"\"\n Bulk action for generating a PDF document of the content\n \"\"\"\n\n #: Whether the view requires change permissions\n require_change_permission = False\n #: Whether the public translation objects should be prefetched\n prefetch_public_translations = True\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Apply the bulk action on every item in the queryset and redirect\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The redirect\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n # Generate PDF document and redirect to it\n return generate_pdf(\n request.region,\n kwargs.get(\"language_slug\"),\n self.get_queryset(),\n )\n\n\n# pylint: disable=too-many-ancestors\nclass ExportXliffView(PageBulkActionMixin, BulkActionView):\n \"\"\"\n Bulk action for generating XLIFF files for translations\n \"\"\"\n\n #: Whether only public translation should be exported\n only_public = False\n #: Whether the view requires change permissions\n require_change_permission = False\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Function for handling a pdf export request for pages.\n The pages get extracted from request.GET attribute and the request is forwarded to :func:`~integreat_cms.cms.utils.pdf_utils.generate_pdf`\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The redirect\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n target_language = get_object_or_404(\n self.request.region.language_tree_nodes,\n language__slug=kwargs.get(\"language_slug\"),\n parent__isnull=False,\n ).language\n\n xliff_file_url = pages_to_xliff_file(\n request, self.get_queryset(), target_language, only_public=self.only_public\n )\n if xliff_file_url:\n # Insert link with automatic download into success message\n messages.success(\n request,\n __(\n _(\"XLIFF file for translation to {} successfully created.\").format(\n target_language\n ),\n _(\n \"If the download does not start automatically, please click {}here{}.\"\n ).format(\n f\"<a data-auto-download href='{xliff_file_url}' class='font-bold underline hover:no-underline' download>\",\n \"</a>\",\n ),\n ),\n )\n\n # Let the base view handle the redirect\n return super().post(request, *args, **kwargs)\n", "path": "integreat_cms/cms/views/pages/page_bulk_actions.py"}], "after_files": [{"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic.list import MultipleObjectMixin\n\nfrom ....xliff.utils import pages_to_xliff_file\nfrom ...models import Page\nfrom ...utils.pdf_utils import generate_pdf\nfrom ...utils.translation_utils import ugettext_many_lazy as __\nfrom ..bulk_action_views import BulkActionView\n\nlogger = logging.getLogger(__name__)\n\n\nclass PageBulkActionMixin(MultipleObjectMixin):\n \"\"\"\n Mixin for page bulk actions\n \"\"\"\n\n #: The model of this :class:`~integreat_cms.cms.views.bulk_action_views.BulkActionView`\n model = Page\n\n\n# pylint: disable=too-many-ancestors\nclass GeneratePdfView(PageBulkActionMixin, BulkActionView):\n \"\"\"\n Bulk action for generating a PDF document of the content\n \"\"\"\n\n #: Whether the view requires change permissions\n require_change_permission = False\n #: Whether the public translation objects should be prefetched\n prefetch_public_translations = True\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Apply the bulk action on every item in the queryset and redirect\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The redirect\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n # Generate PDF document and redirect to it\n return generate_pdf(\n request.region,\n kwargs.get(\"language_slug\"),\n self.get_queryset(),\n )\n\n\n# pylint: disable=too-many-ancestors\nclass ExportXliffView(PageBulkActionMixin, BulkActionView):\n \"\"\"\n Bulk action for generating XLIFF files for translations\n \"\"\"\n\n #: Whether only public translation should be exported\n only_public = False\n #: Whether the view requires change permissions\n require_change_permission = False\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Function for handling a pdf export request for pages.\n The pages get extracted from request.GET attribute and the request is forwarded to :func:`~integreat_cms.cms.utils.pdf_utils.generate_pdf`\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The redirect\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n target_language = get_object_or_404(\n self.request.region.language_tree_nodes,\n language__slug=kwargs.get(\"language_slug\"),\n parent__isnull=False,\n ).language\n\n xliff_file_url = pages_to_xliff_file(\n request, self.get_queryset(), target_language, only_public=self.only_public\n )\n if xliff_file_url:\n # Insert link with automatic download into success message\n messages.success(\n request,\n __(\n _(\n \"XLIFF file with published pages only for translation to {} successfully created.\"\n ).format(target_language)\n if self.only_public\n else _(\n \"XLIFF file with unpublished and published pages for translation to {} successfully created.\"\n ).format(target_language),\n _(\n \"If the download does not start automatically, please click {}here{}.\"\n ).format(\n f\"<a data-auto-download href='{xliff_file_url}' class='font-bold underline hover:no-underline' download>\",\n \"</a>\",\n ),\n ),\n )\n\n # Let the base view handle the redirect\n return super().post(request, *args, **kwargs)\n", "path": "integreat_cms/cms/views/pages/page_bulk_actions.py"}]}
1,518
199
gh_patches_debug_40548
rasdani/github-patches
git_diff
huggingface__huggingface_hub-285
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add sentence similarity for spaCy Computing similarity is simple with `spaCy` and there is an existing [template pipeline](https://github.com/huggingface/huggingface_hub/blob/main/api-inference-community/docker_images/common/app/pipelines/sentence_similarity.py) for sentence similarity at which can be modified for this. https://spacy.io/api/doc#similarity --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py` Content: ``` 1 from typing import Dict, List, Union 2 3 from app.pipelines import Pipeline 4 5 6 class SentenceSimilarityPipeline(Pipeline): 7 def __init__( 8 self, 9 model_id: str, 10 ): 11 # IMPLEMENT_THIS 12 # Preload all the elements you are going to need at inference. 13 # For instance your model, processors, tokenizer that might be needed. 14 # This function is only called once, so do all the heavy processing I/O here 15 raise NotImplementedError( 16 "Please implement SentenceSimilarityPipeline __init__ function" 17 ) 18 19 def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]: 20 """ 21 Args: 22 inputs (:obj:`dict`): 23 a dictionary containing two keys, 'source_sentence' mapping 24 to the sentence that will be compared against all the others, 25 and 'sentences', mapping to a list of strings to which the 26 source will be compared. 27 Return: 28 A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`. 29 """ 30 # IMPLEMENT_THIS 31 raise NotImplementedError( 32 "Please implement SentenceSimilarityPipeline __call__ function" 33 ) 34 ``` Path: `api-inference-community/docker_images/spacy/app/main.py` Content: ``` 1 import functools 2 import logging 3 import os 4 from typing import Dict, Type 5 6 from api_inference_community.routes import pipeline_route, status_ok 7 from app.pipelines import ( 8 Pipeline, 9 TextClassificationPipeline, 10 TokenClassificationPipeline, 11 ) 12 from starlette.applications import Starlette 13 from starlette.middleware import Middleware 14 from starlette.middleware.gzip import GZipMiddleware 15 from starlette.routing import Route 16 17 18 TASK = os.getenv("TASK") 19 MODEL_ID = os.getenv("MODEL_ID") 20 21 22 logger = logging.getLogger(__name__) 23 24 25 # Add the allowed tasks 26 # Supported tasks are: 27 # - text-generation 28 # - text-classification 29 # - token-classification 30 # - translation 31 # - summarization 32 # - automatic-speech-recognition 33 # - ... 34 # For instance 35 # from app.pipelines import AutomaticSpeechRecognitionPipeline 36 # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline} 37 # You can check the requirements and expectations of each pipelines in their respective 38 # directories. Implement directly within the directories. 39 ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { 40 "token-classification": TokenClassificationPipeline, 41 "text-classification": TextClassificationPipeline, 42 } 43 44 45 @functools.lru_cache() 46 def get_pipeline() -> Pipeline: 47 task = os.environ["TASK"] 48 model_id = os.environ["MODEL_ID"] 49 if task not in ALLOWED_TASKS: 50 raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}") 51 return ALLOWED_TASKS[task](model_id) 52 53 54 routes = [ 55 Route("/{whatever:path}", status_ok), 56 Route("/{whatever:path}", pipeline_route, methods=["POST"]), 57 ] 58 59 middleware = [Middleware(GZipMiddleware, minimum_size=1000)] 60 if os.environ.get("DEBUG", "") == "1": 61 from starlette.middleware.cors import CORSMiddleware 62 63 middleware.append( 64 Middleware( 65 CORSMiddleware, 66 allow_origins=["*"], 67 allow_headers=["*"], 68 allow_methods=["*"], 69 ) 70 ) 71 72 app = Starlette(routes=routes, middleware=middleware) 73 74 75 @app.on_event("startup") 76 async def startup_event(): 77 logger = logging.getLogger("uvicorn.access") 78 handler = logging.StreamHandler() 79 handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) 80 logger.handlers = [handler] 81 82 # Link between `api-inference-community` and framework code. 83 app.get_pipeline = get_pipeline 84 try: 85 get_pipeline() 86 except Exception: 87 # We can fail so we can show exception later. 88 pass 89 90 91 if __name__ == "__main__": 92 try: 93 get_pipeline() 94 except Exception: 95 # We can fail so we can show exception later. 96 pass 97 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/api-inference-community/docker_images/spacy/app/main.py b/api-inference-community/docker_images/spacy/app/main.py --- a/api-inference-community/docker_images/spacy/app/main.py +++ b/api-inference-community/docker_images/spacy/app/main.py @@ -6,6 +6,7 @@ from api_inference_community.routes import pipeline_route, status_ok from app.pipelines import ( Pipeline, + SentenceSimilarityPipeline, TextClassificationPipeline, TokenClassificationPipeline, ) @@ -30,6 +31,7 @@ # - translation # - summarization # - automatic-speech-recognition +# - sentence-similarity # - ... # For instance # from app.pipelines import AutomaticSpeechRecognitionPipeline @@ -39,6 +41,7 @@ ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { "token-classification": TokenClassificationPipeline, "text-classification": TextClassificationPipeline, + "sentence-similarity": SentenceSimilarityPipeline, } diff --git a/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py b/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py --- a/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py +++ b/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py @@ -1,3 +1,6 @@ +import os +import subprocess +import sys from typing import Dict, List, Union from app.pipelines import Pipeline @@ -8,14 +11,23 @@ self, model_id: str, ): - # IMPLEMENT_THIS - # Preload all the elements you are going to need at inference. - # For instance your model, processors, tokenizer that might be needed. - # This function is only called once, so do all the heavy processing I/O here - raise NotImplementedError( - "Please implement SentenceSimilarityPipeline __init__ function" + # At the time, only public models from spaCy are allowed in the inference API. + full_model_path = model_id.split("/") + if len(full_model_path) != 2: + raise ValueError( + f"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)" + ) + namespace, model_name = full_model_path + package = f"https://huggingface.co/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl" + cache_dir = os.environ["PIP_CACHE"] + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "--cache-dir", cache_dir, package] ) + import spacy + + self.model = spacy.load(model_name) + def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]: """ Args: @@ -27,7 +39,12 @@ Return: A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`. """ - # IMPLEMENT_THIS - raise NotImplementedError( - "Please implement SentenceSimilarityPipeline __call__ function" - ) + source_sentence = inputs["source_sentence"] + source_doc = self.model(source_sentence) + + similarities = [] + for sentence in inputs["sentences"]: + search_doc = self.model(sentence) + similarities.append(source_doc.similarity(search_doc)) + + return similarities
{"golden_diff": "diff --git a/api-inference-community/docker_images/spacy/app/main.py b/api-inference-community/docker_images/spacy/app/main.py\n--- a/api-inference-community/docker_images/spacy/app/main.py\n+++ b/api-inference-community/docker_images/spacy/app/main.py\n@@ -6,6 +6,7 @@\n from api_inference_community.routes import pipeline_route, status_ok\n from app.pipelines import (\n Pipeline,\n+ SentenceSimilarityPipeline,\n TextClassificationPipeline,\n TokenClassificationPipeline,\n )\n@@ -30,6 +31,7 @@\n # - translation\n # - summarization\n # - automatic-speech-recognition\n+# - sentence-similarity\n # - ...\n # For instance\n # from app.pipelines import AutomaticSpeechRecognitionPipeline\n@@ -39,6 +41,7 @@\n ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {\n \"token-classification\": TokenClassificationPipeline,\n \"text-classification\": TextClassificationPipeline,\n+ \"sentence-similarity\": SentenceSimilarityPipeline,\n }\n \n \ndiff --git a/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py b/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py\n--- a/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py\n+++ b/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py\n@@ -1,3 +1,6 @@\n+import os\n+import subprocess\n+import sys\n from typing import Dict, List, Union\n \n from app.pipelines import Pipeline\n@@ -8,14 +11,23 @@\n self,\n model_id: str,\n ):\n- # IMPLEMENT_THIS\n- # Preload all the elements you are going to need at inference.\n- # For instance your model, processors, tokenizer that might be needed.\n- # This function is only called once, so do all the heavy processing I/O here\n- raise NotImplementedError(\n- \"Please implement SentenceSimilarityPipeline __init__ function\"\n+ # At the time, only public models from spaCy are allowed in the inference API.\n+ full_model_path = model_id.split(\"/\")\n+ if len(full_model_path) != 2:\n+ raise ValueError(\n+ f\"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)\"\n+ )\n+ namespace, model_name = full_model_path\n+ package = f\"https://huggingface.co/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl\"\n+ cache_dir = os.environ[\"PIP_CACHE\"]\n+ subprocess.check_call(\n+ [sys.executable, \"-m\", \"pip\", \"install\", \"--cache-dir\", cache_dir, package]\n )\n \n+ import spacy\n+\n+ self.model = spacy.load(model_name)\n+\n def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]:\n \"\"\"\n Args:\n@@ -27,7 +39,12 @@\n Return:\n A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`.\n \"\"\"\n- # IMPLEMENT_THIS\n- raise NotImplementedError(\n- \"Please implement SentenceSimilarityPipeline __call__ function\"\n- )\n+ source_sentence = inputs[\"source_sentence\"]\n+ source_doc = self.model(source_sentence)\n+\n+ similarities = []\n+ for sentence in inputs[\"sentences\"]:\n+ search_doc = self.model(sentence)\n+ similarities.append(source_doc.similarity(search_doc))\n+\n+ return similarities\n", "issue": "Add sentence similarity for spaCy\nComputing similarity is simple with `spaCy` and there is an existing [template pipeline](https://github.com/huggingface/huggingface_hub/blob/main/api-inference-community/docker_images/common/app/pipelines/sentence_similarity.py) for sentence similarity at which can be modified for this. \r\n\r\nhttps://spacy.io/api/doc#similarity\n", "before_files": [{"content": "from typing import Dict, List, Union\n\nfrom app.pipelines import Pipeline\n\n\nclass SentenceSimilarityPipeline(Pipeline):\n def __init__(\n self,\n model_id: str,\n ):\n # IMPLEMENT_THIS\n # Preload all the elements you are going to need at inference.\n # For instance your model, processors, tokenizer that might be needed.\n # This function is only called once, so do all the heavy processing I/O here\n raise NotImplementedError(\n \"Please implement SentenceSimilarityPipeline __init__ function\"\n )\n\n def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]:\n \"\"\"\n Args:\n inputs (:obj:`dict`):\n a dictionary containing two keys, 'source_sentence' mapping\n to the sentence that will be compared against all the others,\n and 'sentences', mapping to a list of strings to which the\n source will be compared.\n Return:\n A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`.\n \"\"\"\n # IMPLEMENT_THIS\n raise NotImplementedError(\n \"Please implement SentenceSimilarityPipeline __call__ function\"\n )\n", "path": "api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py"}, {"content": "import functools\nimport logging\nimport os\nfrom typing import Dict, Type\n\nfrom api_inference_community.routes import pipeline_route, status_ok\nfrom app.pipelines import (\n Pipeline,\n TextClassificationPipeline,\n TokenClassificationPipeline,\n)\nfrom starlette.applications import Starlette\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.gzip import GZipMiddleware\nfrom starlette.routing import Route\n\n\nTASK = os.getenv(\"TASK\")\nMODEL_ID = os.getenv(\"MODEL_ID\")\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Add the allowed tasks\n# Supported tasks are:\n# - text-generation\n# - text-classification\n# - token-classification\n# - translation\n# - summarization\n# - automatic-speech-recognition\n# - ...\n# For instance\n# from app.pipelines import AutomaticSpeechRecognitionPipeline\n# ALLOWED_TASKS = {\"automatic-speech-recognition\": AutomaticSpeechRecognitionPipeline}\n# You can check the requirements and expectations of each pipelines in their respective\n# directories. Implement directly within the directories.\nALLOWED_TASKS: Dict[str, Type[Pipeline]] = {\n \"token-classification\": TokenClassificationPipeline,\n \"text-classification\": TextClassificationPipeline,\n}\n\n\n@functools.lru_cache()\ndef get_pipeline() -> Pipeline:\n task = os.environ[\"TASK\"]\n model_id = os.environ[\"MODEL_ID\"]\n if task not in ALLOWED_TASKS:\n raise EnvironmentError(f\"{task} is not a valid pipeline for model : {model_id}\")\n return ALLOWED_TASKS[task](model_id)\n\n\nroutes = [\n Route(\"/{whatever:path}\", status_ok),\n Route(\"/{whatever:path}\", pipeline_route, methods=[\"POST\"]),\n]\n\nmiddleware = [Middleware(GZipMiddleware, minimum_size=1000)]\nif os.environ.get(\"DEBUG\", \"\") == \"1\":\n from starlette.middleware.cors import CORSMiddleware\n\n middleware.append(\n Middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_headers=[\"*\"],\n allow_methods=[\"*\"],\n )\n )\n\napp = Starlette(routes=routes, middleware=middleware)\n\n\n@app.on_event(\"startup\")\nasync def startup_event():\n logger = logging.getLogger(\"uvicorn.access\")\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\"))\n logger.handlers = [handler]\n\n # Link between `api-inference-community` and framework code.\n app.get_pipeline = get_pipeline\n try:\n get_pipeline()\n except Exception:\n # We can fail so we can show exception later.\n pass\n\n\nif __name__ == \"__main__\":\n try:\n get_pipeline()\n except Exception:\n # We can fail so we can show exception later.\n pass\n", "path": "api-inference-community/docker_images/spacy/app/main.py"}], "after_files": [{"content": "import os\nimport subprocess\nimport sys\nfrom typing import Dict, List, Union\n\nfrom app.pipelines import Pipeline\n\n\nclass SentenceSimilarityPipeline(Pipeline):\n def __init__(\n self,\n model_id: str,\n ):\n # At the time, only public models from spaCy are allowed in the inference API.\n full_model_path = model_id.split(\"/\")\n if len(full_model_path) != 2:\n raise ValueError(\n f\"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)\"\n )\n namespace, model_name = full_model_path\n package = f\"https://huggingface.co/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl\"\n cache_dir = os.environ[\"PIP_CACHE\"]\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"--cache-dir\", cache_dir, package]\n )\n\n import spacy\n\n self.model = spacy.load(model_name)\n\n def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]:\n \"\"\"\n Args:\n inputs (:obj:`dict`):\n a dictionary containing two keys, 'source_sentence' mapping\n to the sentence that will be compared against all the others,\n and 'sentences', mapping to a list of strings to which the\n source will be compared.\n Return:\n A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`.\n \"\"\"\n source_sentence = inputs[\"source_sentence\"]\n source_doc = self.model(source_sentence)\n\n similarities = []\n for sentence in inputs[\"sentences\"]:\n search_doc = self.model(sentence)\n similarities.append(source_doc.similarity(search_doc))\n\n return similarities\n", "path": "api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py"}, {"content": "import functools\nimport logging\nimport os\nfrom typing import Dict, Type\n\nfrom api_inference_community.routes import pipeline_route, status_ok\nfrom app.pipelines import (\n Pipeline,\n SentenceSimilarityPipeline,\n TextClassificationPipeline,\n TokenClassificationPipeline,\n)\nfrom starlette.applications import Starlette\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.gzip import GZipMiddleware\nfrom starlette.routing import Route\n\n\nTASK = os.getenv(\"TASK\")\nMODEL_ID = os.getenv(\"MODEL_ID\")\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Add the allowed tasks\n# Supported tasks are:\n# - text-generation\n# - text-classification\n# - token-classification\n# - translation\n# - summarization\n# - automatic-speech-recognition\n# - sentence-similarity\n# - ...\n# For instance\n# from app.pipelines import AutomaticSpeechRecognitionPipeline\n# ALLOWED_TASKS = {\"automatic-speech-recognition\": AutomaticSpeechRecognitionPipeline}\n# You can check the requirements and expectations of each pipelines in their respective\n# directories. Implement directly within the directories.\nALLOWED_TASKS: Dict[str, Type[Pipeline]] = {\n \"token-classification\": TokenClassificationPipeline,\n \"text-classification\": TextClassificationPipeline,\n \"sentence-similarity\": SentenceSimilarityPipeline,\n}\n\n\n@functools.lru_cache()\ndef get_pipeline() -> Pipeline:\n task = os.environ[\"TASK\"]\n model_id = os.environ[\"MODEL_ID\"]\n if task not in ALLOWED_TASKS:\n raise EnvironmentError(f\"{task} is not a valid pipeline for model : {model_id}\")\n return ALLOWED_TASKS[task](model_id)\n\n\nroutes = [\n Route(\"/{whatever:path}\", status_ok),\n Route(\"/{whatever:path}\", pipeline_route, methods=[\"POST\"]),\n]\n\nmiddleware = [Middleware(GZipMiddleware, minimum_size=1000)]\nif os.environ.get(\"DEBUG\", \"\") == \"1\":\n from starlette.middleware.cors import CORSMiddleware\n\n middleware.append(\n Middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_headers=[\"*\"],\n allow_methods=[\"*\"],\n )\n )\n\napp = Starlette(routes=routes, middleware=middleware)\n\n\n@app.on_event(\"startup\")\nasync def startup_event():\n logger = logging.getLogger(\"uvicorn.access\")\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\"))\n logger.handlers = [handler]\n\n # Link between `api-inference-community` and framework code.\n app.get_pipeline = get_pipeline\n try:\n get_pipeline()\n except Exception:\n # We can fail so we can show exception later.\n pass\n\n\nif __name__ == \"__main__\":\n try:\n get_pipeline()\n except Exception:\n # We can fail so we can show exception later.\n pass\n", "path": "api-inference-community/docker_images/spacy/app/main.py"}]}
1,458
776
gh_patches_debug_29797
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-1953
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error handler should not capture exceptions in DEBUG mode ## Description Based on the [conclusion from the discussion](https://github.com/centerofci/mathesar/discussions/1150). Improve backend's debugability via two changes: 1. when backend is in debug-mode, add a pretty-printed stacktrace alongside the current error JSON; 2. stop wrapping unhandled exceptions in 4999 errors. For some context, we're catching backend exceptions and wrapping them in custom errors so as to provide a uniform error interface to API clients. As it is now it has detrimental effects on debugging, because the resulting error messages don't include stacktraces. Above changes aim to address that. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mathesar/exception_handlers.py` Content: ``` 1 import warnings 2 3 from django.conf import settings 4 from django.db import IntegrityError as DjangoIntegrityError 5 from django.utils.encoding import force_str 6 from rest_framework.views import exception_handler 7 from rest_framework_friendly_errors.settings import FRIENDLY_EXCEPTION_DICT 8 from sqlalchemy.exc import IntegrityError, ProgrammingError 9 10 from db.types.exceptions import UnsupportedTypeException 11 from mathesar.api.exceptions.database_exceptions import ( 12 base_exceptions as base_api_exceptions, 13 exceptions as database_api_exceptions, 14 ) 15 from mathesar.api.exceptions.data_import_exceptions import exceptions as data_import_api_exceptions 16 from mathesar.api.exceptions.error_codes import ErrorCodes 17 from mathesar.api.exceptions.exception_mappers import integrity_error_mapper 18 from mathesar.api.exceptions.generic_exceptions.base_exceptions import get_default_api_exception 19 from mathesar.errors import URLDownloadError, URLNotReachable, URLInvalidContentTypeError 20 21 exception_map = { 22 IntegrityError: integrity_error_mapper, 23 DjangoIntegrityError: integrity_error_mapper, 24 UnsupportedTypeException: lambda exc: database_api_exceptions.UnsupportedTypeAPIException(exc), 25 ProgrammingError: lambda exc: base_api_exceptions.ProgrammingAPIException(exc), 26 URLDownloadError: lambda exc: data_import_api_exceptions.URLDownloadErrorAPIException(exc), 27 URLNotReachable: lambda exc: data_import_api_exceptions.URLNotReachableAPIException(exc), 28 URLInvalidContentTypeError: lambda exc: data_import_api_exceptions.URLInvalidContentTypeAPIException(exc) 29 } 30 31 32 def standardize_error_response(data): 33 for index, error in enumerate(data): 34 if 'code' in error: 35 if error['code'] is not None and str(error['code']) != 'None': 36 data[index]['code'] = int(error['code']) 37 else: 38 data[index]['code'] = ErrorCodes.UnknownError.value 39 if 'detail' not in error: 40 data[index]['detail'] = error.pop('details', {}) 41 return data 42 43 44 def mathesar_exception_handler(exc, context): 45 response = exception_handler(exc, context) 46 # DRF default exception handler does not handle non API errors, 47 # So we convert it to proper API response 48 if not response: 49 # Check if we have an equivalent API exception that is able to convert the exception to proper error 50 mapped_exception_class = exception_map.get(exc.__class__) 51 if mapped_exception_class is None: 52 if getattr(settings, 'MATHESAR_CAPTURE_UNHANDLED_EXCEPTION', False): 53 mapped_exception_class = get_default_api_exception 54 else: 55 raise exc 56 api_exception = mapped_exception_class(exc) 57 response = exception_handler(api_exception, context) 58 59 if response is not None: 60 # Check if conforms to the api spec 61 if is_pretty(response.data): 62 # Validation exception converts error_codes from integer to string, we need to convert it back into 63 response.data = standardize_error_response(response.data) 64 return response 65 # Certain error raised by drf automatically don't follow the api error spec, 66 # so we convert those into proper format 67 else: 68 warnings.warn("Error Response does not conform to the api spec. Please handle the exception properly") 69 error_code = FRIENDLY_EXCEPTION_DICT.get( 70 exc.__class__.__name__, None 71 ) 72 if error_code is None and settings.MATHESAR_MODE != "PRODUCTION": 73 raise Exception("Error Response does not conform to the api spec. Please handle the exception properly") 74 75 # Errors thrown by drf automatically 76 if isinstance(response.data, dict): 77 error_message = response.data.pop('detail', '') 78 79 response_data = {} 80 response_data['code'] = error_code 81 response_data['message'] = error_message 82 response_data['details'] = {'exception': force_str(exc)} 83 response.data = [response_data] 84 return response 85 86 87 def is_pretty(data): 88 if not isinstance(data, list): 89 return False 90 else: 91 for error_details in data: 92 if ( 93 not isinstance(error_details, dict) 94 or 'code' not in error_details 95 or 'message' not in error_details 96 ): 97 return False 98 return True 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mathesar/exception_handlers.py b/mathesar/exception_handlers.py --- a/mathesar/exception_handlers.py +++ b/mathesar/exception_handlers.py @@ -1,4 +1,5 @@ import warnings +import traceback from django.conf import settings from django.db import IntegrityError as DjangoIntegrityError @@ -38,6 +39,9 @@ data[index]['code'] = ErrorCodes.UnknownError.value if 'detail' not in error: data[index]['detail'] = error.pop('details', {}) + # Adds a Stack-trace of the error for better debugging + if settings.MATHESAR_MODE == 'DEVELOPMENT': + data[index]['stacktrace'] = reformat_stacktrace(traceback.format_exc()) return data @@ -80,6 +84,9 @@ response_data['code'] = error_code response_data['message'] = error_message response_data['details'] = {'exception': force_str(exc)} + # Stacktrace should only be returned if MATHESAR_MODE is set to DEVELOPMENT + if settings.MATHESAR_MODE == 'DEVELOPMENT': + response_data['stacktrace'] = reformat_stacktrace(traceback.format_exc()) response.data = [response_data] return response @@ -96,3 +103,8 @@ ): return False return True + + +def reformat_stacktrace(stacktrace): + stacktrace_list = stacktrace.splitlines()[1:] + return [f'{i + 1}. {line.strip()}' for i, line in enumerate(stacktrace_list)]
{"golden_diff": "diff --git a/mathesar/exception_handlers.py b/mathesar/exception_handlers.py\n--- a/mathesar/exception_handlers.py\n+++ b/mathesar/exception_handlers.py\n@@ -1,4 +1,5 @@\n import warnings\n+import traceback\n \n from django.conf import settings\n from django.db import IntegrityError as DjangoIntegrityError\n@@ -38,6 +39,9 @@\n data[index]['code'] = ErrorCodes.UnknownError.value\n if 'detail' not in error:\n data[index]['detail'] = error.pop('details', {})\n+ # Adds a Stack-trace of the error for better debugging\n+ if settings.MATHESAR_MODE == 'DEVELOPMENT':\n+ data[index]['stacktrace'] = reformat_stacktrace(traceback.format_exc())\n return data\n \n \n@@ -80,6 +84,9 @@\n response_data['code'] = error_code\n response_data['message'] = error_message\n response_data['details'] = {'exception': force_str(exc)}\n+ # Stacktrace should only be returned if MATHESAR_MODE is set to DEVELOPMENT\n+ if settings.MATHESAR_MODE == 'DEVELOPMENT':\n+ response_data['stacktrace'] = reformat_stacktrace(traceback.format_exc())\n response.data = [response_data]\n return response\n \n@@ -96,3 +103,8 @@\n ):\n return False\n return True\n+\n+\n+def reformat_stacktrace(stacktrace):\n+ stacktrace_list = stacktrace.splitlines()[1:]\n+ return [f'{i + 1}. {line.strip()}' for i, line in enumerate(stacktrace_list)]\n", "issue": "Error handler should not capture exceptions in DEBUG mode\n## Description\r\nBased on the [conclusion from the discussion](https://github.com/centerofci/mathesar/discussions/1150).\r\n\r\nImprove backend's debugability via two changes:\r\n1. when backend is in debug-mode, add a pretty-printed stacktrace alongside the current error JSON;\r\n2. stop wrapping unhandled exceptions in 4999 errors.\r\n\r\nFor some context, we're catching backend exceptions and wrapping them in custom errors so as to provide a uniform error interface to API clients. As it is now it has detrimental effects on debugging, because the resulting error messages don't include stacktraces. Above changes aim to address that.\n", "before_files": [{"content": "import warnings\n\nfrom django.conf import settings\nfrom django.db import IntegrityError as DjangoIntegrityError\nfrom django.utils.encoding import force_str\nfrom rest_framework.views import exception_handler\nfrom rest_framework_friendly_errors.settings import FRIENDLY_EXCEPTION_DICT\nfrom sqlalchemy.exc import IntegrityError, ProgrammingError\n\nfrom db.types.exceptions import UnsupportedTypeException\nfrom mathesar.api.exceptions.database_exceptions import (\n base_exceptions as base_api_exceptions,\n exceptions as database_api_exceptions,\n)\nfrom mathesar.api.exceptions.data_import_exceptions import exceptions as data_import_api_exceptions\nfrom mathesar.api.exceptions.error_codes import ErrorCodes\nfrom mathesar.api.exceptions.exception_mappers import integrity_error_mapper\nfrom mathesar.api.exceptions.generic_exceptions.base_exceptions import get_default_api_exception\nfrom mathesar.errors import URLDownloadError, URLNotReachable, URLInvalidContentTypeError\n\nexception_map = {\n IntegrityError: integrity_error_mapper,\n DjangoIntegrityError: integrity_error_mapper,\n UnsupportedTypeException: lambda exc: database_api_exceptions.UnsupportedTypeAPIException(exc),\n ProgrammingError: lambda exc: base_api_exceptions.ProgrammingAPIException(exc),\n URLDownloadError: lambda exc: data_import_api_exceptions.URLDownloadErrorAPIException(exc),\n URLNotReachable: lambda exc: data_import_api_exceptions.URLNotReachableAPIException(exc),\n URLInvalidContentTypeError: lambda exc: data_import_api_exceptions.URLInvalidContentTypeAPIException(exc)\n}\n\n\ndef standardize_error_response(data):\n for index, error in enumerate(data):\n if 'code' in error:\n if error['code'] is not None and str(error['code']) != 'None':\n data[index]['code'] = int(error['code'])\n else:\n data[index]['code'] = ErrorCodes.UnknownError.value\n if 'detail' not in error:\n data[index]['detail'] = error.pop('details', {})\n return data\n\n\ndef mathesar_exception_handler(exc, context):\n response = exception_handler(exc, context)\n # DRF default exception handler does not handle non API errors,\n # So we convert it to proper API response\n if not response:\n # Check if we have an equivalent API exception that is able to convert the exception to proper error\n mapped_exception_class = exception_map.get(exc.__class__)\n if mapped_exception_class is None:\n if getattr(settings, 'MATHESAR_CAPTURE_UNHANDLED_EXCEPTION', False):\n mapped_exception_class = get_default_api_exception\n else:\n raise exc\n api_exception = mapped_exception_class(exc)\n response = exception_handler(api_exception, context)\n\n if response is not None:\n # Check if conforms to the api spec\n if is_pretty(response.data):\n # Validation exception converts error_codes from integer to string, we need to convert it back into\n response.data = standardize_error_response(response.data)\n return response\n # Certain error raised by drf automatically don't follow the api error spec,\n # so we convert those into proper format\n else:\n warnings.warn(\"Error Response does not conform to the api spec. Please handle the exception properly\")\n error_code = FRIENDLY_EXCEPTION_DICT.get(\n exc.__class__.__name__, None\n )\n if error_code is None and settings.MATHESAR_MODE != \"PRODUCTION\":\n raise Exception(\"Error Response does not conform to the api spec. Please handle the exception properly\")\n\n # Errors thrown by drf automatically\n if isinstance(response.data, dict):\n error_message = response.data.pop('detail', '')\n\n response_data = {}\n response_data['code'] = error_code\n response_data['message'] = error_message\n response_data['details'] = {'exception': force_str(exc)}\n response.data = [response_data]\n return response\n\n\ndef is_pretty(data):\n if not isinstance(data, list):\n return False\n else:\n for error_details in data:\n if (\n not isinstance(error_details, dict)\n or 'code' not in error_details\n or 'message' not in error_details\n ):\n return False\n return True\n", "path": "mathesar/exception_handlers.py"}], "after_files": [{"content": "import warnings\nimport traceback\n\nfrom django.conf import settings\nfrom django.db import IntegrityError as DjangoIntegrityError\nfrom django.utils.encoding import force_str\nfrom rest_framework.views import exception_handler\nfrom rest_framework_friendly_errors.settings import FRIENDLY_EXCEPTION_DICT\nfrom sqlalchemy.exc import IntegrityError, ProgrammingError\n\nfrom db.types.exceptions import UnsupportedTypeException\nfrom mathesar.api.exceptions.database_exceptions import (\n base_exceptions as base_api_exceptions,\n exceptions as database_api_exceptions,\n)\nfrom mathesar.api.exceptions.data_import_exceptions import exceptions as data_import_api_exceptions\nfrom mathesar.api.exceptions.error_codes import ErrorCodes\nfrom mathesar.api.exceptions.exception_mappers import integrity_error_mapper\nfrom mathesar.api.exceptions.generic_exceptions.base_exceptions import get_default_api_exception\nfrom mathesar.errors import URLDownloadError, URLNotReachable, URLInvalidContentTypeError\n\nexception_map = {\n IntegrityError: integrity_error_mapper,\n DjangoIntegrityError: integrity_error_mapper,\n UnsupportedTypeException: lambda exc: database_api_exceptions.UnsupportedTypeAPIException(exc),\n ProgrammingError: lambda exc: base_api_exceptions.ProgrammingAPIException(exc),\n URLDownloadError: lambda exc: data_import_api_exceptions.URLDownloadErrorAPIException(exc),\n URLNotReachable: lambda exc: data_import_api_exceptions.URLNotReachableAPIException(exc),\n URLInvalidContentTypeError: lambda exc: data_import_api_exceptions.URLInvalidContentTypeAPIException(exc)\n}\n\n\ndef standardize_error_response(data):\n for index, error in enumerate(data):\n if 'code' in error:\n if error['code'] is not None and str(error['code']) != 'None':\n data[index]['code'] = int(error['code'])\n else:\n data[index]['code'] = ErrorCodes.UnknownError.value\n if 'detail' not in error:\n data[index]['detail'] = error.pop('details', {})\n # Adds a Stack-trace of the error for better debugging\n if settings.MATHESAR_MODE == 'DEVELOPMENT':\n data[index]['stacktrace'] = reformat_stacktrace(traceback.format_exc())\n return data\n\n\ndef mathesar_exception_handler(exc, context):\n response = exception_handler(exc, context)\n # DRF default exception handler does not handle non API errors,\n # So we convert it to proper API response\n if not response:\n # Check if we have an equivalent API exception that is able to convert the exception to proper error\n mapped_exception_class = exception_map.get(exc.__class__)\n if mapped_exception_class is None:\n if getattr(settings, 'MATHESAR_CAPTURE_UNHANDLED_EXCEPTION', False):\n mapped_exception_class = get_default_api_exception\n else:\n raise exc\n api_exception = mapped_exception_class(exc)\n response = exception_handler(api_exception, context)\n\n if response is not None:\n # Check if conforms to the api spec\n if is_pretty(response.data):\n # Validation exception converts error_codes from integer to string, we need to convert it back into\n response.data = standardize_error_response(response.data)\n return response\n # Certain error raised by drf automatically don't follow the api error spec,\n # so we convert those into proper format\n else:\n warnings.warn(\"Error Response does not conform to the api spec. Please handle the exception properly\")\n error_code = FRIENDLY_EXCEPTION_DICT.get(\n exc.__class__.__name__, None\n )\n if error_code is None and settings.MATHESAR_MODE != \"PRODUCTION\":\n raise Exception(\"Error Response does not conform to the api spec. Please handle the exception properly\")\n\n # Errors thrown by drf automatically\n if isinstance(response.data, dict):\n error_message = response.data.pop('detail', '')\n\n response_data = {}\n response_data['code'] = error_code\n response_data['message'] = error_message\n response_data['details'] = {'exception': force_str(exc)}\n # Stacktrace should only be returned if MATHESAR_MODE is set to DEVELOPMENT\n if settings.MATHESAR_MODE == 'DEVELOPMENT':\n response_data['stacktrace'] = reformat_stacktrace(traceback.format_exc())\n response.data = [response_data]\n return response\n\n\ndef is_pretty(data):\n if not isinstance(data, list):\n return False\n else:\n for error_details in data:\n if (\n not isinstance(error_details, dict)\n or 'code' not in error_details\n or 'message' not in error_details\n ):\n return False\n return True\n\n\ndef reformat_stacktrace(stacktrace):\n stacktrace_list = stacktrace.splitlines()[1:]\n return [f'{i + 1}. {line.strip()}' for i, line in enumerate(stacktrace_list)]\n", "path": "mathesar/exception_handlers.py"}]}
1,448
356
gh_patches_debug_3010
rasdani/github-patches
git_diff
cocotb__cocotb-208
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Redhat 6.5 can no longer raise a TestError Regressions report pass but number of tests has gone done on some simulators. Icarus for instance shows this. ``` 0.00ns INFO  cocotb.gpi gpi_embed.c:213 in embed_sim_init Running on Icarus Verilog version 0.10.0 (devel) 0.00ns INFO  cocotb.gpi gpi_embed.c:214 in embed_sim_init Python interpreter initialised and cocotb loaded! 0.00ns INFO  cocotb.gpi __init__.py:96 in _initialise_testbench Seeding Python random module with 1421853826 0.00ns INFO  cocotb.gpi __init__.py:110 in _initialise_testbench Running tests with Cocotb v0.5a from /var/lib/jenkins/workspace/cocotb_icarus_x86_64 0.00ns ERROR  cocotb.coroutine.fail decorators.py:99 in __init__ test_duplicate_yield isn't a value coroutine! Did you use the yield keyword? Traceback (most recent call last): File "/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/__init__.py", line 128, in _initialise_testbench regression.initialise() File "/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/regression.py", line 123, in initialise test = thing(self._dut) File "/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/decorators.py", line 356, in _wrapped_test raise_error(self, str(e)) File "/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/result.py", line 42, in raise_error if sys.version_info.major >= 3: AttributeError: 'tuple' object has no attribute 'major' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cocotb/result.py` Content: ``` 1 ''' Copyright (c) 2013 Potential Ventures Ltd 2 Copyright (c) 2013 SolarFlare Communications Inc 3 All rights reserved. 4 5 Redistribution and use in source and binary forms, with or without 6 modification, are permitted provided that the following conditions are met: 7 * Redistributions of source code must retain the above copyright 8 notice, this list of conditions and the following disclaimer. 9 * Redistributions in binary form must reproduce the above copyright 10 notice, this list of conditions and the following disclaimer in the 11 documentation and/or other materials provided with the distribution. 12 * Neither the name of Potential Ventures Ltd, 13 SolarFlare Communications Inc nor the 14 names of its contributors may be used to endorse or promote products 15 derived from this software without specific prior written permission. 16 17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY 21 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' 27 28 # TODO: Coule use cStringIO? 29 import traceback 30 import sys 31 #from StringIO import StringIO 32 from io import StringIO, BytesIO 33 34 def raise_error(obj, msg): 35 """ 36 Creates a TestError exception and raises it after printing a traceback 37 38 obj has a log method 39 msg is a string 40 """ 41 exc_type, exc_value, exc_traceback = sys.exc_info() 42 if sys.version_info.major >= 3: 43 buff = StringIO() 44 traceback.print_tb(exc_traceback, file=buff) 45 else: 46 buff_bytes = BytesIO() 47 traceback.print_tb(exc_traceback, file=buff_bytes) 48 buff = StringIO(buff_bytes.getvalue().decode("UTF-8")) 49 obj.log.error("%s\n%s" % (msg, buff.getvalue())) 50 exception = TestError(msg) 51 exception.stderr.write(buff.getvalue()) 52 raise exception 53 54 def create_error(obj, msg): 55 """ 56 As above, but return the exception rather than raise it, simply to avoid 57 too many levels of nested try/except blocks 58 """ 59 try: 60 raise_error(obj, msg) 61 except TestError as error: 62 return error 63 return TestError("Creating error traceback failed") 64 65 66 class ReturnValue(StopIteration): 67 def __init__(self, retval): 68 self.retval = retval 69 70 class TestComplete(StopIteration): 71 """ 72 Exceptions are used to pass test results around. 73 """ 74 def __init__(self, *args, **kwargs): 75 super(TestComplete, self).__init__(*args, **kwargs) 76 self.stdout = StringIO() 77 self.stderr = StringIO() 78 79 class TestError(TestComplete): pass 80 81 class TestFailure(TestComplete): pass 82 83 class TestSuccess(TestComplete): pass 84 85 class SimFailure(TestComplete): pass 86 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cocotb/result.py b/cocotb/result.py --- a/cocotb/result.py +++ b/cocotb/result.py @@ -39,7 +39,8 @@ msg is a string """ exc_type, exc_value, exc_traceback = sys.exc_info() - if sys.version_info.major >= 3: + # 2.6 cannot use named access + if sys.version_info[0] >= 3: buff = StringIO() traceback.print_tb(exc_traceback, file=buff) else:
{"golden_diff": "diff --git a/cocotb/result.py b/cocotb/result.py\n--- a/cocotb/result.py\n+++ b/cocotb/result.py\n@@ -39,7 +39,8 @@\n msg is a string\n \"\"\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n- if sys.version_info.major >= 3:\n+ # 2.6 cannot use named access\n+ if sys.version_info[0] >= 3:\n buff = StringIO()\n traceback.print_tb(exc_traceback, file=buff)\n else:\n", "issue": "Redhat 6.5 can no longer raise a TestError\nRegressions report pass but number of tests has gone done on some simulators. Icarus for instance shows this.\n\n```\n0.00ns \u001b[34mINFO \u001b[39m cocotb.gpi gpi_embed.c:213 in embed_sim_init \u001b[34mRunning on Icarus Verilog version 0.10.0 (devel)\u001b[39m\n 0.00ns \u001b[34mINFO \u001b[39m cocotb.gpi gpi_embed.c:214 in embed_sim_init \u001b[34mPython interpreter initialised and cocotb loaded!\u001b[39m\n 0.00ns \u001b[34mINFO \u001b[39m cocotb.gpi __init__.py:96 in _initialise_testbench \u001b[34mSeeding Python random module with 1421853826\u001b[39m\n 0.00ns \u001b[34mINFO \u001b[39m cocotb.gpi __init__.py:110 in _initialise_testbench \u001b[34mRunning tests with Cocotb v0.5a from /var/lib/jenkins/workspace/cocotb_icarus_x86_64\u001b[39m\n 0.00ns \u001b[31mERROR \u001b[39m cocotb.coroutine.fail decorators.py:99 in __init__ \u001b[31mtest_duplicate_yield isn't a value coroutine! Did you use the yield keyword?\u001b[39m\nTraceback (most recent call last):\n File \"/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/__init__.py\", line 128, in _initialise_testbench\n regression.initialise()\n File \"/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/regression.py\", line 123, in initialise\n test = thing(self._dut)\n File \"/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/decorators.py\", line 356, in _wrapped_test\n raise_error(self, str(e))\n File \"/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/result.py\", line 42, in raise_error\n if sys.version_info.major >= 3:\nAttributeError: 'tuple' object has no attribute 'major'\n```\n\n", "before_files": [{"content": "''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n# TODO: Coule use cStringIO?\nimport traceback\nimport sys\n#from StringIO import StringIO\nfrom io import StringIO, BytesIO\n\ndef raise_error(obj, msg):\n \"\"\"\n Creates a TestError exception and raises it after printing a traceback\n\n obj has a log method\n msg is a string\n \"\"\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n if sys.version_info.major >= 3:\n buff = StringIO()\n traceback.print_tb(exc_traceback, file=buff)\n else:\n buff_bytes = BytesIO()\n traceback.print_tb(exc_traceback, file=buff_bytes)\n buff = StringIO(buff_bytes.getvalue().decode(\"UTF-8\"))\n obj.log.error(\"%s\\n%s\" % (msg, buff.getvalue()))\n exception = TestError(msg)\n exception.stderr.write(buff.getvalue())\n raise exception\n\ndef create_error(obj, msg):\n \"\"\"\n As above, but return the exception rather than raise it, simply to avoid\n too many levels of nested try/except blocks\n \"\"\"\n try:\n raise_error(obj, msg)\n except TestError as error:\n return error\n return TestError(\"Creating error traceback failed\")\n\n\nclass ReturnValue(StopIteration):\n def __init__(self, retval):\n self.retval = retval\n\nclass TestComplete(StopIteration):\n \"\"\"\n Exceptions are used to pass test results around.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(TestComplete, self).__init__(*args, **kwargs)\n self.stdout = StringIO()\n self.stderr = StringIO()\n\nclass TestError(TestComplete): pass\n\nclass TestFailure(TestComplete): pass\n\nclass TestSuccess(TestComplete): pass\n\nclass SimFailure(TestComplete): pass\n", "path": "cocotb/result.py"}], "after_files": [{"content": "''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n# TODO: Coule use cStringIO?\nimport traceback\nimport sys\n#from StringIO import StringIO\nfrom io import StringIO, BytesIO\n\ndef raise_error(obj, msg):\n \"\"\"\n Creates a TestError exception and raises it after printing a traceback\n\n obj has a log method\n msg is a string\n \"\"\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n # 2.6 cannot use named access\n if sys.version_info[0] >= 3:\n buff = StringIO()\n traceback.print_tb(exc_traceback, file=buff)\n else:\n buff_bytes = BytesIO()\n traceback.print_tb(exc_traceback, file=buff_bytes)\n buff = StringIO(buff_bytes.getvalue().decode(\"UTF-8\"))\n obj.log.error(\"%s\\n%s\" % (msg, buff.getvalue()))\n exception = TestError(msg)\n exception.stderr.write(buff.getvalue())\n raise exception\n\ndef create_error(obj, msg):\n \"\"\"\n As above, but return the exception rather than raise it, simply to avoid\n too many levels of nested try/except blocks\n \"\"\"\n try:\n raise_error(obj, msg)\n except TestError as error:\n return error\n return TestError(\"Creating error traceback failed\")\n\n\nclass ReturnValue(StopIteration):\n def __init__(self, retval):\n self.retval = retval\n\nclass TestComplete(StopIteration):\n \"\"\"\n Exceptions are used to pass test results around.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(TestComplete, self).__init__(*args, **kwargs)\n self.stdout = StringIO()\n self.stderr = StringIO()\n\nclass TestError(TestComplete): pass\n\nclass TestFailure(TestComplete): pass\n\nclass TestSuccess(TestComplete): pass\n\nclass SimFailure(TestComplete): pass\n", "path": "cocotb/result.py"}]}
1,701
127
gh_patches_debug_19252
rasdani/github-patches
git_diff
Kinto__kinto-658
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [cliquet] Add a way to change the port used when using kinto start. original: https://github.com/mozilla-services/cliquet/issues/686 All in title --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/__main__.py` Content: ``` 1 from __future__ import print_function 2 import argparse 3 import os 4 import sys 5 6 from six.moves import input 7 from kinto.core import scripts 8 from pyramid.scripts import pserve 9 from pyramid.paster import bootstrap 10 from kinto import __version__ 11 from kinto.config import init 12 13 CONFIG_FILE = 'config/kinto.ini' 14 15 16 def main(args=None): 17 """The main routine.""" 18 if args is None: 19 args = sys.argv[1:] 20 21 parser = argparse.ArgumentParser(description="Kinto commands") 22 parser.add_argument('--ini', 23 help='Application configuration file', 24 dest='ini_file', 25 required=False, 26 default=CONFIG_FILE) 27 parser.add_argument('--backend', 28 help='Specify backend', 29 dest='backend', 30 required=False, 31 default=None) 32 33 parser.add_argument('-v', '--version', 34 action='version', version=__version__, 35 help='Print the Kinto version and exit.') 36 37 subparsers = parser.add_subparsers(title='subcommands', 38 description='valid subcommands', 39 dest='subcommand', 40 help='init/start/migrate') 41 subparsers.required = True 42 43 parser_init = subparsers.add_parser('init') 44 parser_init.set_defaults(which='init') 45 46 parser_migrate = subparsers.add_parser('migrate') 47 parser_migrate.set_defaults(which='migrate') 48 49 parser_start = subparsers.add_parser('start') 50 parser_start.add_argument('--reload', 51 action='store_true', 52 help='Restart when code or config changes', 53 required=False, 54 default=False) 55 parser_start.set_defaults(which='start') 56 57 parsed_args = vars(parser.parse_args(args)) 58 59 config_file = parsed_args['ini_file'] 60 61 if parsed_args['which'] == 'init': 62 if os.path.exists(config_file): 63 print("%s already exists." % config_file, file=sys.stderr) 64 return 1 65 66 backend = parsed_args['backend'] 67 if not backend: 68 while True: 69 prompt = ("Select the backend you would like to use: " 70 "(1 - postgresql, 2 - redis, default - memory) ") 71 answer = input(prompt).strip() 72 try: 73 backends = {"1": "postgresql", "2": "redis", "": "memory"} 74 backend = backends[answer] 75 break 76 except KeyError: 77 pass 78 79 init(config_file, backend) 80 81 # Install postgresql libraries if necessary 82 if backend == "postgresql": 83 try: 84 import psycopg2 # NOQA 85 except ImportError: 86 import pip 87 pip.main(['install', "kinto[postgresql]"]) 88 89 elif parsed_args['which'] == 'migrate': 90 env = bootstrap(config_file) 91 scripts.migrate(env) 92 93 elif parsed_args['which'] == 'start': 94 pserve_argv = ['pserve', config_file] 95 if parsed_args['reload']: 96 pserve_argv.append('--reload') 97 pserve.main(pserve_argv) 98 99 return 0 100 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/__main__.py b/kinto/__main__.py --- a/kinto/__main__.py +++ b/kinto/__main__.py @@ -52,6 +52,11 @@ help='Restart when code or config changes', required=False, default=False) + parser_start.add_argument('--port', + type=int, + help='Listening port number', + required=False, + default=8888) parser_start.set_defaults(which='start') parsed_args = vars(parser.parse_args(args)) @@ -94,6 +99,7 @@ pserve_argv = ['pserve', config_file] if parsed_args['reload']: pserve_argv.append('--reload') + pserve_argv.append('http_port=%s' % parsed_args['port']) pserve.main(pserve_argv) return 0
{"golden_diff": "diff --git a/kinto/__main__.py b/kinto/__main__.py\n--- a/kinto/__main__.py\n+++ b/kinto/__main__.py\n@@ -52,6 +52,11 @@\n help='Restart when code or config changes',\n required=False,\n default=False)\n+ parser_start.add_argument('--port',\n+ type=int,\n+ help='Listening port number',\n+ required=False,\n+ default=8888)\n parser_start.set_defaults(which='start')\n \n parsed_args = vars(parser.parse_args(args))\n@@ -94,6 +99,7 @@\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n+ pserve_argv.append('http_port=%s' % parsed_args['port'])\n pserve.main(pserve_argv)\n \n return 0\n", "issue": "[cliquet] Add a way to change the port used when using kinto start.\noriginal: https://github.com/mozilla-services/cliquet/issues/686\n\nAll in title\n\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\n\nfrom six.moves import input\nfrom kinto.core import scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nCONFIG_FILE = 'config/kinto.ini'\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto commands\")\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=CONFIG_FILE)\n parser.add_argument('--backend',\n help='Specify backend',\n dest='backend',\n required=False,\n default=None)\n\n parser.add_argument('-v', '--version',\n action='version', version=__version__,\n help='Print the Kinto version and exit.')\n\n subparsers = parser.add_subparsers(title='subcommands',\n description='valid subcommands',\n dest='subcommand',\n help='init/start/migrate')\n subparsers.required = True\n\n parser_init = subparsers.add_parser('init')\n parser_init.set_defaults(which='init')\n\n parser_migrate = subparsers.add_parser('migrate')\n parser_migrate.set_defaults(which='migrate')\n\n parser_start = subparsers.add_parser('start')\n parser_start.add_argument('--reload',\n action='store_true',\n help='Restart when code or config changes',\n required=False,\n default=False)\n parser_start.set_defaults(which='start')\n\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args['ini_file']\n\n if parsed_args['which'] == 'init':\n if os.path.exists(config_file):\n print(\"%s already exists.\" % config_file, file=sys.stderr)\n return 1\n\n backend = parsed_args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[postgresql]\"])\n\n elif parsed_args['which'] == 'migrate':\n env = bootstrap(config_file)\n scripts.migrate(env)\n\n elif parsed_args['which'] == 'start':\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n pserve.main(pserve_argv)\n\n return 0\n", "path": "kinto/__main__.py"}], "after_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\n\nfrom six.moves import input\nfrom kinto.core import scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nCONFIG_FILE = 'config/kinto.ini'\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto commands\")\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=CONFIG_FILE)\n parser.add_argument('--backend',\n help='Specify backend',\n dest='backend',\n required=False,\n default=None)\n\n parser.add_argument('-v', '--version',\n action='version', version=__version__,\n help='Print the Kinto version and exit.')\n\n subparsers = parser.add_subparsers(title='subcommands',\n description='valid subcommands',\n dest='subcommand',\n help='init/start/migrate')\n subparsers.required = True\n\n parser_init = subparsers.add_parser('init')\n parser_init.set_defaults(which='init')\n\n parser_migrate = subparsers.add_parser('migrate')\n parser_migrate.set_defaults(which='migrate')\n\n parser_start = subparsers.add_parser('start')\n parser_start.add_argument('--reload',\n action='store_true',\n help='Restart when code or config changes',\n required=False,\n default=False)\n parser_start.add_argument('--port',\n type=int,\n help='Listening port number',\n required=False,\n default=8888)\n parser_start.set_defaults(which='start')\n\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args['ini_file']\n\n if parsed_args['which'] == 'init':\n if os.path.exists(config_file):\n print(\"%s already exists.\" % config_file, file=sys.stderr)\n return 1\n\n backend = parsed_args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[postgresql]\"])\n\n elif parsed_args['which'] == 'migrate':\n env = bootstrap(config_file)\n scripts.migrate(env)\n\n elif parsed_args['which'] == 'start':\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n pserve_argv.append('http_port=%s' % parsed_args['port'])\n pserve.main(pserve_argv)\n\n return 0\n", "path": "kinto/__main__.py"}]}
1,128
194
gh_patches_debug_7797
rasdani/github-patches
git_diff
ESMCI__cime-4085
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CIME mappy scripts_regression_tests in bad shape I'm seeing tons of fails: ``` FAILED (failures=48, errors=7, skipped=21) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/lib/CIME/code_checker.py` Content: ``` 1 """ 2 Libraries for checking python code with pylint 3 """ 4 5 from CIME.XML.standard_module_setup import * 6 7 from CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, get_src_root, is_python_executable, get_cime_default_driver 8 9 from multiprocessing.dummy import Pool as ThreadPool 10 #pylint: disable=import-error 11 from distutils.spawn import find_executable 12 13 logger = logging.getLogger(__name__) 14 15 ############################################################################### 16 def _run_pylint(on_file, interactive): 17 ############################################################################### 18 pylint = find_executable("pylint") 19 20 cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import" 21 cmd_options += ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement" 22 cmd_options += ",logging-format-interpolation,no-name-in-module" 23 cimeroot = get_cime_root() 24 srcroot = get_src_root() 25 26 if "scripts/Tools" in on_file: 27 cmd_options +=",relative-import" 28 29 # add init-hook option 30 cmd_options += " --init-hook='sys.path.extend((\"%s\",\"%s\",\"%s\",\"%s\"))'"%\ 31 (os.path.join(cimeroot,"scripts","lib"), 32 os.path.join(cimeroot,"scripts","Tools"), 33 os.path.join(cimeroot,"scripts","fortran_unit_testing","python"), 34 os.path.join(srcroot,"components","cmeps","cime_config","runseq")) 35 36 cmd = "%s %s %s" % (pylint, cmd_options, on_file) 37 logger.debug("pylint command is %s"%cmd) 38 stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot) 39 if stat != 0: 40 if interactive: 41 logger.info("File %s has pylint problems, please fix\n Use command: %s" % (on_file, cmd)) 42 logger.info(out + "\n" + err) 43 return (on_file, out + "\n" + err) 44 else: 45 if interactive: 46 logger.info("File %s has no pylint problems" % on_file) 47 return (on_file, "") 48 49 ############################################################################### 50 def _matches(file_path, file_ends): 51 ############################################################################### 52 for file_end in file_ends: 53 if file_path.endswith(file_end): 54 return True 55 56 return False 57 58 ############################################################################### 59 def _should_pylint_skip(filepath): 60 ############################################################################### 61 # TODO - get rid of this 62 list_of_directories_to_ignore = ("xmlconvertors", "pointclm", "point_clm", "tools", "machines", "apidocs", "doc") 63 for dir_to_skip in list_of_directories_to_ignore: 64 if dir_to_skip + "/" in filepath: 65 return True 66 if filepath == "scripts/lib/six.py": 67 return True 68 # intended to be temporary, file needs update 69 if filepath.endswith("archive_metadata") or filepath.endswith("pgn.py"): 70 return True 71 72 return False 73 74 ############################################################################### 75 def get_all_checkable_files(): 76 ############################################################################### 77 cimeroot = get_cime_root() 78 all_git_files = run_cmd_no_fail("git ls-files", from_dir=cimeroot, verbose=False).splitlines() 79 if get_cime_default_driver() == "nuopc": 80 srcroot = get_src_root() 81 nuopc_git_files = [] 82 try: 83 nuopc_git_files = run_cmd_no_fail("git ls-files", from_dir=os.path.join(srcroot,"components","cmeps"), verbose=False).splitlines() 84 except: 85 logger.warning("No nuopc driver found in source") 86 all_git_files.extend([os.path.join(srcroot,"components","cmeps",_file) for _file in nuopc_git_files]) 87 files_to_test = [item for item in all_git_files 88 if ((item.endswith(".py") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))] 89 90 return files_to_test 91 92 ############################################################################### 93 def check_code(files, num_procs=10, interactive=False): 94 ############################################################################### 95 """ 96 Check all python files in the given directory 97 98 Returns True if all files had no problems 99 """ 100 # Get list of files to check, we look to see if user-provided file argument 101 # is a valid file, if not, we search the repo for a file with similar name. 102 files_to_check = [] 103 if files: 104 repo_files = get_all_checkable_files() 105 for filearg in files: 106 if os.path.exists(filearg): 107 files_to_check.append(os.path.abspath(filearg)) 108 else: 109 found = False 110 for repo_file in repo_files: 111 if repo_file.endswith(filearg): 112 found = True 113 files_to_check.append(repo_file) # could have multiple matches 114 115 if not found: 116 logger.warning("Could not find file matching argument '%s'" % filearg) 117 else: 118 # Check every python file 119 files_to_check = get_all_checkable_files() 120 121 if "scripts/lib/six.py" in files_to_check: 122 files_to_check.remove("scripts/lib/six.py") 123 logger.info("Not checking contributed file six.py") 124 125 expect(len(files_to_check) > 0, "No matching files found") 126 127 # No point in using more threads than files 128 if len(files_to_check) < num_procs: 129 num_procs = len(files_to_check) 130 131 pool = ThreadPool(num_procs) 132 results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check) 133 pool.close() 134 pool.join() 135 return dict(results) 136 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/lib/CIME/code_checker.py b/scripts/lib/CIME/code_checker.py --- a/scripts/lib/CIME/code_checker.py +++ b/scripts/lib/CIME/code_checker.py @@ -19,7 +19,7 @@ cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import" cmd_options += ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement" - cmd_options += ",logging-format-interpolation,no-name-in-module" + cmd_options += ",logging-format-interpolation,no-name-in-module,arguments-renamed" cimeroot = get_cime_root() srcroot = get_src_root()
{"golden_diff": "diff --git a/scripts/lib/CIME/code_checker.py b/scripts/lib/CIME/code_checker.py\n--- a/scripts/lib/CIME/code_checker.py\n+++ b/scripts/lib/CIME/code_checker.py\n@@ -19,7 +19,7 @@\n \n cmd_options = \" --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import\"\n cmd_options += \",fixme,broad-except,bare-except,eval-used,exec-used,global-statement\"\n- cmd_options += \",logging-format-interpolation,no-name-in-module\"\n+ cmd_options += \",logging-format-interpolation,no-name-in-module,arguments-renamed\"\n cimeroot = get_cime_root()\n srcroot = get_src_root()\n", "issue": "CIME mappy scripts_regression_tests in bad shape\nI'm seeing tons of fails:\r\n\r\n```\r\nFAILED (failures=48, errors=7, skipped=21)\r\n```\n", "before_files": [{"content": "\"\"\"\nLibraries for checking python code with pylint\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, get_src_root, is_python_executable, get_cime_default_driver\n\nfrom multiprocessing.dummy import Pool as ThreadPool\n#pylint: disable=import-error\nfrom distutils.spawn import find_executable\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef _run_pylint(on_file, interactive):\n###############################################################################\n pylint = find_executable(\"pylint\")\n\n cmd_options = \" --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import\"\n cmd_options += \",fixme,broad-except,bare-except,eval-used,exec-used,global-statement\"\n cmd_options += \",logging-format-interpolation,no-name-in-module\"\n cimeroot = get_cime_root()\n srcroot = get_src_root()\n\n if \"scripts/Tools\" in on_file:\n cmd_options +=\",relative-import\"\n\n # add init-hook option\n cmd_options += \" --init-hook='sys.path.extend((\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"))'\"%\\\n (os.path.join(cimeroot,\"scripts\",\"lib\"),\n os.path.join(cimeroot,\"scripts\",\"Tools\"),\n os.path.join(cimeroot,\"scripts\",\"fortran_unit_testing\",\"python\"),\n os.path.join(srcroot,\"components\",\"cmeps\",\"cime_config\",\"runseq\"))\n\n cmd = \"%s %s %s\" % (pylint, cmd_options, on_file)\n logger.debug(\"pylint command is %s\"%cmd)\n stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)\n if stat != 0:\n if interactive:\n logger.info(\"File %s has pylint problems, please fix\\n Use command: %s\" % (on_file, cmd))\n logger.info(out + \"\\n\" + err)\n return (on_file, out + \"\\n\" + err)\n else:\n if interactive:\n logger.info(\"File %s has no pylint problems\" % on_file)\n return (on_file, \"\")\n\n###############################################################################\ndef _matches(file_path, file_ends):\n###############################################################################\n for file_end in file_ends:\n if file_path.endswith(file_end):\n return True\n\n return False\n\n###############################################################################\ndef _should_pylint_skip(filepath):\n###############################################################################\n # TODO - get rid of this\n list_of_directories_to_ignore = (\"xmlconvertors\", \"pointclm\", \"point_clm\", \"tools\", \"machines\", \"apidocs\", \"doc\")\n for dir_to_skip in list_of_directories_to_ignore:\n if dir_to_skip + \"/\" in filepath:\n return True\n if filepath == \"scripts/lib/six.py\":\n return True\n # intended to be temporary, file needs update\n if filepath.endswith(\"archive_metadata\") or filepath.endswith(\"pgn.py\"):\n return True\n\n return False\n\n###############################################################################\ndef get_all_checkable_files():\n###############################################################################\n cimeroot = get_cime_root()\n all_git_files = run_cmd_no_fail(\"git ls-files\", from_dir=cimeroot, verbose=False).splitlines()\n if get_cime_default_driver() == \"nuopc\":\n srcroot = get_src_root()\n nuopc_git_files = []\n try:\n nuopc_git_files = run_cmd_no_fail(\"git ls-files\", from_dir=os.path.join(srcroot,\"components\",\"cmeps\"), verbose=False).splitlines()\n except:\n logger.warning(\"No nuopc driver found in source\")\n all_git_files.extend([os.path.join(srcroot,\"components\",\"cmeps\",_file) for _file in nuopc_git_files])\n files_to_test = [item for item in all_git_files\n if ((item.endswith(\".py\") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))]\n\n return files_to_test\n\n###############################################################################\ndef check_code(files, num_procs=10, interactive=False):\n###############################################################################\n \"\"\"\n Check all python files in the given directory\n\n Returns True if all files had no problems\n \"\"\"\n # Get list of files to check, we look to see if user-provided file argument\n # is a valid file, if not, we search the repo for a file with similar name.\n files_to_check = []\n if files:\n repo_files = get_all_checkable_files()\n for filearg in files:\n if os.path.exists(filearg):\n files_to_check.append(os.path.abspath(filearg))\n else:\n found = False\n for repo_file in repo_files:\n if repo_file.endswith(filearg):\n found = True\n files_to_check.append(repo_file) # could have multiple matches\n\n if not found:\n logger.warning(\"Could not find file matching argument '%s'\" % filearg)\n else:\n # Check every python file\n files_to_check = get_all_checkable_files()\n\n if \"scripts/lib/six.py\" in files_to_check:\n files_to_check.remove(\"scripts/lib/six.py\")\n logger.info(\"Not checking contributed file six.py\")\n\n expect(len(files_to_check) > 0, \"No matching files found\")\n\n # No point in using more threads than files\n if len(files_to_check) < num_procs:\n num_procs = len(files_to_check)\n\n pool = ThreadPool(num_procs)\n results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)\n pool.close()\n pool.join()\n return dict(results)\n", "path": "scripts/lib/CIME/code_checker.py"}], "after_files": [{"content": "\"\"\"\nLibraries for checking python code with pylint\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, get_src_root, is_python_executable, get_cime_default_driver\n\nfrom multiprocessing.dummy import Pool as ThreadPool\n#pylint: disable=import-error\nfrom distutils.spawn import find_executable\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef _run_pylint(on_file, interactive):\n###############################################################################\n pylint = find_executable(\"pylint\")\n\n cmd_options = \" --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import\"\n cmd_options += \",fixme,broad-except,bare-except,eval-used,exec-used,global-statement\"\n cmd_options += \",logging-format-interpolation,no-name-in-module,arguments-renamed\"\n cimeroot = get_cime_root()\n srcroot = get_src_root()\n\n if \"scripts/Tools\" in on_file:\n cmd_options +=\",relative-import\"\n\n # add init-hook option\n cmd_options += \" --init-hook='sys.path.extend((\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"))'\"%\\\n (os.path.join(cimeroot,\"scripts\",\"lib\"),\n os.path.join(cimeroot,\"scripts\",\"Tools\"),\n os.path.join(cimeroot,\"scripts\",\"fortran_unit_testing\",\"python\"),\n os.path.join(srcroot,\"components\",\"cmeps\",\"cime_config\",\"runseq\"))\n\n cmd = \"%s %s %s\" % (pylint, cmd_options, on_file)\n logger.debug(\"pylint command is %s\"%cmd)\n stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)\n if stat != 0:\n if interactive:\n logger.info(\"File %s has pylint problems, please fix\\n Use command: %s\" % (on_file, cmd))\n logger.info(out + \"\\n\" + err)\n return (on_file, out + \"\\n\" + err)\n else:\n if interactive:\n logger.info(\"File %s has no pylint problems\" % on_file)\n return (on_file, \"\")\n\n###############################################################################\ndef _matches(file_path, file_ends):\n###############################################################################\n for file_end in file_ends:\n if file_path.endswith(file_end):\n return True\n\n return False\n\n###############################################################################\ndef _should_pylint_skip(filepath):\n###############################################################################\n # TODO - get rid of this\n list_of_directories_to_ignore = (\"xmlconvertors\", \"pointclm\", \"point_clm\", \"tools\", \"machines\", \"apidocs\", \"doc\")\n for dir_to_skip in list_of_directories_to_ignore:\n if dir_to_skip + \"/\" in filepath:\n return True\n if filepath == \"scripts/lib/six.py\":\n return True\n # intended to be temporary, file needs update\n if filepath.endswith(\"archive_metadata\") or filepath.endswith(\"pgn.py\"):\n return True\n\n return False\n\n###############################################################################\ndef get_all_checkable_files():\n###############################################################################\n cimeroot = get_cime_root()\n all_git_files = run_cmd_no_fail(\"git ls-files\", from_dir=cimeroot, verbose=False).splitlines()\n if get_cime_default_driver() == \"nuopc\":\n srcroot = get_src_root()\n nuopc_git_files = []\n try:\n nuopc_git_files = run_cmd_no_fail(\"git ls-files\", from_dir=os.path.join(srcroot,\"components\",\"cmeps\"), verbose=False).splitlines()\n except:\n logger.warning(\"No nuopc driver found in source\")\n all_git_files.extend([os.path.join(\"components\",\"cmeps\",_file) for _file in nuopc_git_files])\n files_to_test = [item for item in all_git_files\n if ((item.endswith(\".py\") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))]\n\n return files_to_test\n\n###############################################################################\ndef check_code(files, num_procs=10, interactive=False):\n###############################################################################\n \"\"\"\n Check all python files in the given directory\n\n Returns True if all files had no problems\n \"\"\"\n # Get list of files to check, we look to see if user-provided file argument\n # is a valid file, if not, we search the repo for a file with similar name.\n files_to_check = []\n if files:\n repo_files = get_all_checkable_files()\n for filearg in files:\n if os.path.exists(filearg):\n files_to_check.append(os.path.abspath(filearg))\n else:\n found = False\n for repo_file in repo_files:\n if repo_file.endswith(filearg):\n found = True\n files_to_check.append(repo_file) # could have multiple matches\n\n if not found:\n logger.warning(\"Could not find file matching argument '%s'\" % filearg)\n else:\n # Check every python file\n files_to_check = get_all_checkable_files()\n\n if \"scripts/lib/six.py\" in files_to_check:\n files_to_check.remove(\"scripts/lib/six.py\")\n logger.info(\"Not checking contributed file six.py\")\n\n expect(len(files_to_check) > 0, \"No matching files found\")\n\n # No point in using more threads than files\n if len(files_to_check) < num_procs:\n num_procs = len(files_to_check)\n\n pool = ThreadPool(num_procs)\n results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)\n pool.close()\n pool.join()\n return dict(results)\n", "path": "scripts/lib/CIME/code_checker.py"}]}
1,807
158
gh_patches_debug_11000
rasdani/github-patches
git_diff
sotetsuk__pgx-907
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add `pgx.__version__` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import find_packages, setup 2 from pathlib import Path 3 4 long_description = (Path(__file__).parent / "README.md").read_text() 5 6 setup( 7 name="pgx", 8 version="0.7.4", 9 description="GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)", 10 long_description=long_description, 11 long_description_content_type="text/markdown", 12 url="https://github.com/sotetsuk/pgx", 13 author="Sotetsu KOYAMADA", 14 author_email="sotetsu.koyamada@gmail.com", 15 keywords="", 16 packages=find_packages(), 17 package_data={ 18 "": ["LICENSE", "*.svg", "_src/assets/*.npy", "_mahjong/*.json"] 19 }, 20 include_package_data=True, 21 install_requires=[ 22 "jax>=0.3.25", # JAX version on Colab (TPU) 23 "svgwrite", 24 "typing_extensions", 25 ], 26 classifiers=[ 27 "Programming Language :: Python :: 3.8", 28 "Programming Language :: Python :: 3.9", 29 "Programming Language :: Python :: 3.10", 30 ], 31 ) 32 ``` Path: `pgx/__init__.py` Content: ``` 1 from pgx._src.api_test import v1_api_test 2 from pgx._src.visualizer import ( 3 save_svg, 4 save_svg_animation, 5 set_visualization_config, 6 ) 7 from pgx.v1 import Env, EnvId, State, available_envs, make 8 9 __all__ = [ 10 # v1 api components 11 "State", 12 "Env", 13 "EnvId", 14 "make", 15 "available_envs", 16 # visualization 17 "set_visualization_config", 18 "save_svg", 19 "save_svg_animation", 20 # api tests 21 "v1_api_test", 22 ] 23 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pgx/__init__.py b/pgx/__init__.py --- a/pgx/__init__.py +++ b/pgx/__init__.py @@ -6,6 +6,8 @@ ) from pgx.v1 import Env, EnvId, State, available_envs, make +__version__ = "0.8.0" + __all__ = [ # v1 api components "State", diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -5,7 +5,6 @@ setup( name="pgx", - version="0.7.4", description="GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)", long_description=long_description, long_description_content_type="text/markdown",
{"golden_diff": "diff --git a/pgx/__init__.py b/pgx/__init__.py\n--- a/pgx/__init__.py\n+++ b/pgx/__init__.py\n@@ -6,6 +6,8 @@\n )\n from pgx.v1 import Env, EnvId, State, available_envs, make\n \n+__version__ = \"0.8.0\"\n+\n __all__ = [\n # v1 api components\n \"State\",\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,7 +5,6 @@\n \n setup(\n name=\"pgx\",\n- version=\"0.7.4\",\n description=\"GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n", "issue": "Add `pgx.__version__`\n\n", "before_files": [{"content": "from setuptools import find_packages, setup\nfrom pathlib import Path\n\nlong_description = (Path(__file__).parent / \"README.md\").read_text()\n\nsetup(\n name=\"pgx\",\n version=\"0.7.4\",\n description=\"GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/sotetsuk/pgx\",\n author=\"Sotetsu KOYAMADA\",\n author_email=\"sotetsu.koyamada@gmail.com\",\n keywords=\"\",\n packages=find_packages(),\n package_data={\n \"\": [\"LICENSE\", \"*.svg\", \"_src/assets/*.npy\", \"_mahjong/*.json\"]\n },\n include_package_data=True,\n install_requires=[\n \"jax>=0.3.25\", # JAX version on Colab (TPU)\n \"svgwrite\",\n \"typing_extensions\",\n ],\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}, {"content": "from pgx._src.api_test import v1_api_test\nfrom pgx._src.visualizer import (\n save_svg,\n save_svg_animation,\n set_visualization_config,\n)\nfrom pgx.v1 import Env, EnvId, State, available_envs, make\n\n__all__ = [\n # v1 api components\n \"State\",\n \"Env\",\n \"EnvId\",\n \"make\",\n \"available_envs\",\n # visualization\n \"set_visualization_config\",\n \"save_svg\",\n \"save_svg_animation\",\n # api tests\n \"v1_api_test\",\n]\n", "path": "pgx/__init__.py"}], "after_files": [{"content": "from setuptools import find_packages, setup\nfrom pathlib import Path\n\nlong_description = (Path(__file__).parent / \"README.md\").read_text()\n\nsetup(\n name=\"pgx\",\n description=\"GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/sotetsuk/pgx\",\n author=\"Sotetsu KOYAMADA\",\n author_email=\"sotetsu.koyamada@gmail.com\",\n keywords=\"\",\n packages=find_packages(),\n package_data={\n \"\": [\"LICENSE\", \"*.svg\", \"_src/assets/*.npy\", \"_mahjong/*.json\"]\n },\n include_package_data=True,\n install_requires=[\n \"jax>=0.3.25\", # JAX version on Colab (TPU)\n \"svgwrite\",\n \"typing_extensions\",\n ],\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}, {"content": "from pgx._src.api_test import v1_api_test\nfrom pgx._src.visualizer import (\n save_svg,\n save_svg_animation,\n set_visualization_config,\n)\nfrom pgx.v1 import Env, EnvId, State, available_envs, make\n\n__version__ = \"0.8.0\"\n\n__all__ = [\n # v1 api components\n \"State\",\n \"Env\",\n \"EnvId\",\n \"make\",\n \"available_envs\",\n # visualization\n \"set_visualization_config\",\n \"save_svg\",\n \"save_svg_animation\",\n # api tests\n \"v1_api_test\",\n]\n", "path": "pgx/__init__.py"}]}
755
182
gh_patches_debug_36196
rasdani/github-patches
git_diff
conda__conda-3969
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Invalid requirement while trying to use pip options Hi! I have in my pip section inside envrionment.yaml file this line ```- rep --install-option='--no-deps'``` while I am trying to update my environment I am getting this error ```Invalid requirement: 'rep --install-option='--no-deps''``` if I do pip -r requirements.txt and I have that line as it is in requirements.txt it works. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conda_env/installers/pip.py` Content: ``` 1 from __future__ import absolute_import 2 import subprocess 3 from conda_env.pip_util import pip_args 4 from conda.exceptions import CondaValueError 5 6 7 def install(prefix, specs, args, env, prune=False): 8 pip_cmd = pip_args(prefix) + ['install', ] + specs 9 process = subprocess.Popen(pip_cmd, universal_newlines=True) 10 process.communicate() 11 12 if process.returncode != 0: 13 raise CondaValueError("pip returned an error.") 14 ``` Path: `conda/egg_info.py` Content: ``` 1 """ 2 Functions related to core conda functionality that relates to manually 3 installed Python packages, e.g. using "python setup.py install", or "pip". 4 """ 5 from __future__ import absolute_import, division, print_function, unicode_literals 6 7 from io import open 8 import os 9 from os.path import isdir, isfile, join 10 import re 11 import sys 12 13 from .common.compat import itervalues, on_win 14 from .core.linked_data import linked_data 15 from .misc import rel_path 16 from .models.dist import Dist 17 18 19 def get_site_packages_dir(installed_pkgs): 20 for info in itervalues(installed_pkgs): 21 if info['name'] == 'python': 22 if on_win: 23 stdlib_dir = 'Lib' 24 else: 25 py_ver = info['version'][:3] 26 stdlib_dir = 'lib/python%s' % py_ver 27 return join(stdlib_dir, 'site-packages') 28 return None 29 30 31 def get_egg_info_files(sp_dir): 32 for fn in os.listdir(sp_dir): 33 if not fn.endswith(('.egg', '.egg-info', '.dist-info')): 34 continue 35 path = join(sp_dir, fn) 36 if isfile(path): 37 yield path 38 elif isdir(path): 39 for path2 in [join(path, 'PKG-INFO'), 40 join(path, 'EGG-INFO', 'PKG-INFO'), 41 join(path, 'METADATA')]: 42 if isfile(path2): 43 yield path2 44 45 46 pat = re.compile(r'(\w+):\s*(\S+)', re.I) 47 def parse_egg_info(path): 48 """ 49 Parse an .egg-info file and return its canonical distribution name 50 """ 51 info = {} 52 for line in open(path, encoding='utf-8'): 53 line = line.strip() 54 m = pat.match(line) 55 if m: 56 key = m.group(1).lower() 57 info[key] = m.group(2) 58 try: 59 return '%(name)s-%(version)s-<pip>' % info 60 except KeyError: 61 pass 62 return None 63 64 65 def get_egg_info(prefix, all_pkgs=False): 66 """ 67 Return a set of canonical names of all Python packages (in `prefix`), 68 by inspecting the .egg-info files inside site-packages. 69 By default, only untracked (not conda installed) .egg-info files are 70 considered. Setting `all_pkgs` to True changes this. 71 """ 72 installed_pkgs = linked_data(prefix) 73 sp_dir = get_site_packages_dir(installed_pkgs) 74 if sp_dir is None: 75 return set() 76 77 conda_files = set() 78 for info in itervalues(installed_pkgs): 79 conda_files.update(info.get('files', [])) 80 81 res = set() 82 for path in get_egg_info_files(join(prefix, sp_dir)): 83 f = rel_path(prefix, path) 84 if all_pkgs or f not in conda_files: 85 try: 86 dist = parse_egg_info(path) 87 except UnicodeDecodeError: 88 dist = None 89 if dist: 90 res.add(Dist(dist)) 91 return res 92 93 94 if __name__ == '__main__': 95 from pprint import pprint 96 pprint(get_egg_info(sys.prefix)) 97 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conda/egg_info.py b/conda/egg_info.py --- a/conda/egg_info.py +++ b/conda/egg_info.py @@ -30,6 +30,10 @@ def get_egg_info_files(sp_dir): for fn in os.listdir(sp_dir): + if fn.endswith('.egg-link'): + with open(join(sp_dir, fn), 'r') as reader: + for egg in get_egg_info_files(reader.readline().strip()): + yield egg if not fn.endswith(('.egg', '.egg-info', '.dist-info')): continue path = join(sp_dir, fn) diff --git a/conda_env/installers/pip.py b/conda_env/installers/pip.py --- a/conda_env/installers/pip.py +++ b/conda_env/installers/pip.py @@ -1,13 +1,56 @@ from __future__ import absolute_import + +import os +import os.path as op import subprocess +import tempfile from conda_env.pip_util import pip_args from conda.exceptions import CondaValueError -def install(prefix, specs, args, env, prune=False): - pip_cmd = pip_args(prefix) + ['install', ] + specs - process = subprocess.Popen(pip_cmd, universal_newlines=True) - process.communicate() +def _pip_install_via_requirements(prefix, specs, args, *_): + """ + Installs the pip dependencies in specs using a temporary pip requirements file. + + Args + ---- + prefix: string + The path to the python and pip executables. + + specs: iterable of strings + Each element should be a valid pip dependency. + See: https://pip.pypa.io/en/stable/user_guide/#requirements-files + https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format + """ + try: + pip_workdir = op.dirname(op.abspath(args.file)) + except AttributeError: + pip_workdir = None + requirements = None + try: + # Generate the temporary requirements file + requirements = tempfile.NamedTemporaryFile(mode='w', + prefix='condaenv.', + suffix='.requirements.txt', + dir=pip_workdir, + delete=False) + requirements.write('\n'.join(specs)) + requirements.close() + # pip command line... + pip_cmd = pip_args(prefix) + ['install', '-r', requirements.name] + # ...run it + process = subprocess.Popen(pip_cmd, + cwd=pip_workdir, + universal_newlines=True) + process.communicate() + if process.returncode != 0: + raise CondaValueError("pip returned an error") + finally: + # Win/Appveyor does not like it if we use context manager + delete=True. + # So we delete the temporary file in a finally block. + if requirements is not None and op.isfile(requirements.name): + os.remove(requirements.name) + - if process.returncode != 0: - raise CondaValueError("pip returned an error.") +# Conform to Installers API +install = _pip_install_via_requirements
{"golden_diff": "diff --git a/conda/egg_info.py b/conda/egg_info.py\n--- a/conda/egg_info.py\n+++ b/conda/egg_info.py\n@@ -30,6 +30,10 @@\n \n def get_egg_info_files(sp_dir):\n for fn in os.listdir(sp_dir):\n+ if fn.endswith('.egg-link'):\n+ with open(join(sp_dir, fn), 'r') as reader:\n+ for egg in get_egg_info_files(reader.readline().strip()):\n+ yield egg\n if not fn.endswith(('.egg', '.egg-info', '.dist-info')):\n continue\n path = join(sp_dir, fn)\ndiff --git a/conda_env/installers/pip.py b/conda_env/installers/pip.py\n--- a/conda_env/installers/pip.py\n+++ b/conda_env/installers/pip.py\n@@ -1,13 +1,56 @@\n from __future__ import absolute_import\n+\n+import os\n+import os.path as op\n import subprocess\n+import tempfile\n from conda_env.pip_util import pip_args\n from conda.exceptions import CondaValueError\n \n \n-def install(prefix, specs, args, env, prune=False):\n- pip_cmd = pip_args(prefix) + ['install', ] + specs\n- process = subprocess.Popen(pip_cmd, universal_newlines=True)\n- process.communicate()\n+def _pip_install_via_requirements(prefix, specs, args, *_):\n+ \"\"\"\n+ Installs the pip dependencies in specs using a temporary pip requirements file.\n+\n+ Args\n+ ----\n+ prefix: string\n+ The path to the python and pip executables.\n+\n+ specs: iterable of strings\n+ Each element should be a valid pip dependency.\n+ See: https://pip.pypa.io/en/stable/user_guide/#requirements-files\n+ https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format\n+ \"\"\"\n+ try:\n+ pip_workdir = op.dirname(op.abspath(args.file))\n+ except AttributeError:\n+ pip_workdir = None\n+ requirements = None\n+ try:\n+ # Generate the temporary requirements file\n+ requirements = tempfile.NamedTemporaryFile(mode='w',\n+ prefix='condaenv.',\n+ suffix='.requirements.txt',\n+ dir=pip_workdir,\n+ delete=False)\n+ requirements.write('\\n'.join(specs))\n+ requirements.close()\n+ # pip command line...\n+ pip_cmd = pip_args(prefix) + ['install', '-r', requirements.name]\n+ # ...run it\n+ process = subprocess.Popen(pip_cmd,\n+ cwd=pip_workdir,\n+ universal_newlines=True)\n+ process.communicate()\n+ if process.returncode != 0:\n+ raise CondaValueError(\"pip returned an error\")\n+ finally:\n+ # Win/Appveyor does not like it if we use context manager + delete=True.\n+ # So we delete the temporary file in a finally block.\n+ if requirements is not None and op.isfile(requirements.name):\n+ os.remove(requirements.name)\n+\n \n- if process.returncode != 0:\n- raise CondaValueError(\"pip returned an error.\")\n+# Conform to Installers API\n+install = _pip_install_via_requirements\n", "issue": "Invalid requirement while trying to use pip options\nHi!\r\n\r\nI have in my pip section inside envrionment.yaml file this line\r\n```- rep --install-option='--no-deps'```\r\n\r\nwhile I am trying to update my environment I am getting this error\r\n\r\n```Invalid requirement: 'rep --install-option='--no-deps''```\r\n\r\nif I do pip -r requirements.txt and I have that line as it is in requirements.txt it works.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nimport subprocess\nfrom conda_env.pip_util import pip_args\nfrom conda.exceptions import CondaValueError\n\n\ndef install(prefix, specs, args, env, prune=False):\n pip_cmd = pip_args(prefix) + ['install', ] + specs\n process = subprocess.Popen(pip_cmd, universal_newlines=True)\n process.communicate()\n\n if process.returncode != 0:\n raise CondaValueError(\"pip returned an error.\")\n", "path": "conda_env/installers/pip.py"}, {"content": "\"\"\"\nFunctions related to core conda functionality that relates to manually\ninstalled Python packages, e.g. using \"python setup.py install\", or \"pip\".\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom io import open\nimport os\nfrom os.path import isdir, isfile, join\nimport re\nimport sys\n\nfrom .common.compat import itervalues, on_win\nfrom .core.linked_data import linked_data\nfrom .misc import rel_path\nfrom .models.dist import Dist\n\n\ndef get_site_packages_dir(installed_pkgs):\n for info in itervalues(installed_pkgs):\n if info['name'] == 'python':\n if on_win:\n stdlib_dir = 'Lib'\n else:\n py_ver = info['version'][:3]\n stdlib_dir = 'lib/python%s' % py_ver\n return join(stdlib_dir, 'site-packages')\n return None\n\n\ndef get_egg_info_files(sp_dir):\n for fn in os.listdir(sp_dir):\n if not fn.endswith(('.egg', '.egg-info', '.dist-info')):\n continue\n path = join(sp_dir, fn)\n if isfile(path):\n yield path\n elif isdir(path):\n for path2 in [join(path, 'PKG-INFO'),\n join(path, 'EGG-INFO', 'PKG-INFO'),\n join(path, 'METADATA')]:\n if isfile(path2):\n yield path2\n\n\npat = re.compile(r'(\\w+):\\s*(\\S+)', re.I)\ndef parse_egg_info(path):\n \"\"\"\n Parse an .egg-info file and return its canonical distribution name\n \"\"\"\n info = {}\n for line in open(path, encoding='utf-8'):\n line = line.strip()\n m = pat.match(line)\n if m:\n key = m.group(1).lower()\n info[key] = m.group(2)\n try:\n return '%(name)s-%(version)s-<pip>' % info\n except KeyError:\n pass\n return None\n\n\ndef get_egg_info(prefix, all_pkgs=False):\n \"\"\"\n Return a set of canonical names of all Python packages (in `prefix`),\n by inspecting the .egg-info files inside site-packages.\n By default, only untracked (not conda installed) .egg-info files are\n considered. Setting `all_pkgs` to True changes this.\n \"\"\"\n installed_pkgs = linked_data(prefix)\n sp_dir = get_site_packages_dir(installed_pkgs)\n if sp_dir is None:\n return set()\n\n conda_files = set()\n for info in itervalues(installed_pkgs):\n conda_files.update(info.get('files', []))\n\n res = set()\n for path in get_egg_info_files(join(prefix, sp_dir)):\n f = rel_path(prefix, path)\n if all_pkgs or f not in conda_files:\n try:\n dist = parse_egg_info(path)\n except UnicodeDecodeError:\n dist = None\n if dist:\n res.add(Dist(dist))\n return res\n\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(get_egg_info(sys.prefix))\n", "path": "conda/egg_info.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport os\nimport os.path as op\nimport subprocess\nimport tempfile\nfrom conda_env.pip_util import pip_args\nfrom conda.exceptions import CondaValueError\n\n\ndef _pip_install_via_requirements(prefix, specs, args, *_):\n \"\"\"\n Installs the pip dependencies in specs using a temporary pip requirements file.\n\n Args\n ----\n prefix: string\n The path to the python and pip executables.\n\n specs: iterable of strings\n Each element should be a valid pip dependency.\n See: https://pip.pypa.io/en/stable/user_guide/#requirements-files\n https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format\n \"\"\"\n try:\n pip_workdir = op.dirname(op.abspath(args.file))\n except AttributeError:\n pip_workdir = None\n requirements = None\n try:\n # Generate the temporary requirements file\n requirements = tempfile.NamedTemporaryFile(mode='w',\n prefix='condaenv.',\n suffix='.requirements.txt',\n dir=pip_workdir,\n delete=False)\n requirements.write('\\n'.join(specs))\n requirements.close()\n # pip command line...\n pip_cmd = pip_args(prefix) + ['install', '-r', requirements.name]\n # ...run it\n process = subprocess.Popen(pip_cmd,\n cwd=pip_workdir,\n universal_newlines=True)\n process.communicate()\n if process.returncode != 0:\n raise CondaValueError(\"pip returned an error\")\n finally:\n # Win/Appveyor does not like it if we use context manager + delete=True.\n # So we delete the temporary file in a finally block.\n if requirements is not None and op.isfile(requirements.name):\n os.remove(requirements.name)\n\n\n# Conform to Installers API\ninstall = _pip_install_via_requirements\n", "path": "conda_env/installers/pip.py"}, {"content": "\"\"\"\nFunctions related to core conda functionality that relates to manually\ninstalled Python packages, e.g. using \"python setup.py install\", or \"pip\".\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom io import open\nimport os\nfrom os.path import isdir, isfile, join\nimport re\nimport sys\n\nfrom .common.compat import itervalues, on_win\nfrom .core.linked_data import linked_data\nfrom .misc import rel_path\nfrom .models.dist import Dist\n\n\ndef get_site_packages_dir(installed_pkgs):\n for info in itervalues(installed_pkgs):\n if info['name'] == 'python':\n if on_win:\n stdlib_dir = 'Lib'\n else:\n py_ver = info['version'][:3]\n stdlib_dir = 'lib/python%s' % py_ver\n return join(stdlib_dir, 'site-packages')\n return None\n\n\ndef get_egg_info_files(sp_dir):\n for fn in os.listdir(sp_dir):\n if fn.endswith('.egg-link'):\n with open(join(sp_dir, fn), 'r') as reader:\n for egg in get_egg_info_files(reader.readline().strip()):\n yield egg\n if not fn.endswith(('.egg', '.egg-info', '.dist-info')):\n continue\n path = join(sp_dir, fn)\n if isfile(path):\n yield path\n elif isdir(path):\n for path2 in [join(path, 'PKG-INFO'),\n join(path, 'EGG-INFO', 'PKG-INFO'),\n join(path, 'METADATA')]:\n if isfile(path2):\n yield path2\n\n\npat = re.compile(r'(\\w+):\\s*(\\S+)', re.I)\ndef parse_egg_info(path):\n \"\"\"\n Parse an .egg-info file and return its canonical distribution name\n \"\"\"\n info = {}\n for line in open(path, encoding='utf-8'):\n line = line.strip()\n m = pat.match(line)\n if m:\n key = m.group(1).lower()\n info[key] = m.group(2)\n try:\n return '%(name)s-%(version)s-<pip>' % info\n except KeyError:\n pass\n return None\n\n\ndef get_egg_info(prefix, all_pkgs=False):\n \"\"\"\n Return a set of canonical names of all Python packages (in `prefix`),\n by inspecting the .egg-info files inside site-packages.\n By default, only untracked (not conda installed) .egg-info files are\n considered. Setting `all_pkgs` to True changes this.\n \"\"\"\n installed_pkgs = linked_data(prefix)\n sp_dir = get_site_packages_dir(installed_pkgs)\n if sp_dir is None:\n return set()\n\n conda_files = set()\n for info in itervalues(installed_pkgs):\n conda_files.update(info.get('files', []))\n\n res = set()\n for path in get_egg_info_files(join(prefix, sp_dir)):\n f = rel_path(prefix, path)\n if all_pkgs or f not in conda_files:\n try:\n dist = parse_egg_info(path)\n except UnicodeDecodeError:\n dist = None\n if dist:\n res.add(Dist(dist))\n return res\n\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(get_egg_info(sys.prefix))\n", "path": "conda/egg_info.py"}]}
1,382
715
gh_patches_debug_52514
rasdani/github-patches
git_diff
rucio__rucio-2079
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Upgrade of dependencies for 1.19.0 Modification ------------ # pip-requires SQLAlchemy 1.2.7 --> 1.2.16 alembic 0.9.9 --> 1.0.6 python-dateutil 2.7.2 --> 2.7.5 s3cmd 2.0.1 --> 2.0.2 stomp.py 4.1.20 --> 4.1.21 dnspython 1.15.0 --> 1.16.0 geoip2 2.8.0 --> 2.9.0 maxminddb 1.3.0 --> 1.4.1 pycparser 2.18 --> 2.19 googleapis-common-protos 1.5.3 --> 1.5.6 httplib2 0.11.3 --> 0.12.0 pyOpenSSL 18.0.0 --> 19.0.0 cryptography 2.3.1 --> 2.4.2 oauth2client 4.1.2 --> 4.1.3 protobuf 3.5.2.post1 --> 3.6.1 grpcio 1.11.0 --> 1.18.0 pyasn1 0.4.2 --> 0.4.5 pyasn1-modules 0.2.1 --> 0.2.3 rsa 3.4.2 --> 4.0 redis 2.10.6 --> 3.0.1 numpy 1.14.2 --> 1.16.0 paramiko 2.4.1 --> 2.4.2 Flask 0.12.4 --> 1.0.2 idna 2.6 --> 2.8 MyProxyClient 2.0.1 --> 2.1.0 # pip-requires-test Paste 2.0.3 --> 3.0.6 coverage 4.4.2 --> 4.5.2 Sphinx 1.6.5 --> 1.8.3 sphinx-rtd-theme 0.2.4 --> 0.4.2 sphinxcontrib-httpdomain 1.6.0 --> 1.7.0 Pygments 2.2.0 --> 2.3.1 pyflakes 1.6.0 --> 2.0.0 flake8 3.5.0 --> 3.6.0 pylint 1.7.4 --> 2.2.2 virtualenv 15.1.0 --> 16.2.0 tox 2.9.1 --> 3.7.0 pytest 3.2.5 --> 4.1.1 pytest-xdist 1.20.1 --> 1.26.0 pytz 2017.3 --> 2018.9 Babel 2.5.1 --> 2.6.0 subprocess32 3.2.7 --> 3.5.3 pycodestyle 2.3.1 --> 2.4.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py` Content: ``` 1 # Copyright 2013-2019 CERN for the benefit of the ATLAS collaboration. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 # Authors: 16 # - Martin Barisits <martin.barisits@cern.ch>, 2019 17 # 18 # Topic: Replicas table PK definition is in wrong order 19 # Revision ID: 3345511706b8 20 # Revises: 9eb936a81eb1 21 # Creation Date: 2019-01-30 14:20:35.058889 22 23 from alembic.op import (create_primary_key, drop_constraint) 24 25 from alembic import context 26 27 28 # revision identifiers used by alembic 29 revision = '3345511706b8' # pylint: disable=invalid-name 30 down_revision = 'bf3baa1c1474' # pylint: disable=invalid-name 31 32 33 def upgrade(): 34 ''' 35 Upgrade the database to this revision 36 ''' 37 if context.get_context().dialect.name != 'sqlite': 38 drop_constraint('REPLICAS_PK', 'replicas') 39 create_primary_key('REPLICAS_PK', 'replicas', ['scope', 'name', 'rse_id']) 40 41 42 def downgrade(): 43 ''' 44 Downgrade the database to the previous revision 45 ''' 46 if context.get_context().dialect.name != 'sqlite': 47 drop_constraint('REPLICAS_PK', 'replicas') 48 create_primary_key('REPLICAS_PK', 'replicas', ['rse_id', 'scope', 'name']) 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py b/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py --- a/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py +++ b/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py @@ -27,7 +27,7 @@ # revision identifiers used by alembic revision = '3345511706b8' # pylint: disable=invalid-name -down_revision = 'bf3baa1c1474' # pylint: disable=invalid-name +down_revision = '01eaf73ab656' # pylint: disable=invalid-name def upgrade():
{"golden_diff": "diff --git a/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py b/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py\n--- a/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py\n+++ b/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py\n@@ -27,7 +27,7 @@\n \n # revision identifiers used by alembic\n revision = '3345511706b8' # pylint: disable=invalid-name\n-down_revision = 'bf3baa1c1474' # pylint: disable=invalid-name\n+down_revision = '01eaf73ab656' # pylint: disable=invalid-name\n \n \n def upgrade():\n", "issue": "Upgrade of dependencies for 1.19.0\nModification\r\n------------\r\n# pip-requires\r\nSQLAlchemy 1.2.7 --> 1.2.16\r\nalembic 0.9.9 --> 1.0.6\r\npython-dateutil 2.7.2 --> 2.7.5\r\ns3cmd 2.0.1 --> 2.0.2\r\nstomp.py 4.1.20 --> 4.1.21\r\ndnspython 1.15.0 --> 1.16.0\r\ngeoip2 2.8.0 --> 2.9.0\r\nmaxminddb 1.3.0 --> 1.4.1\r\npycparser 2.18 --> 2.19\r\ngoogleapis-common-protos 1.5.3 --> 1.5.6\r\nhttplib2 0.11.3 --> 0.12.0\r\npyOpenSSL 18.0.0 --> 19.0.0\r\ncryptography 2.3.1 --> 2.4.2\r\noauth2client 4.1.2 --> 4.1.3\r\nprotobuf 3.5.2.post1 --> 3.6.1\r\ngrpcio 1.11.0 --> 1.18.0\r\npyasn1 0.4.2 --> 0.4.5\r\npyasn1-modules 0.2.1 --> 0.2.3\r\nrsa 3.4.2 --> 4.0\r\nredis 2.10.6 --> 3.0.1\r\nnumpy 1.14.2 --> 1.16.0\r\nparamiko 2.4.1 --> 2.4.2\r\nFlask 0.12.4 --> 1.0.2\r\nidna 2.6 --> 2.8\r\nMyProxyClient 2.0.1 --> 2.1.0\r\n\r\n# pip-requires-test\r\nPaste 2.0.3 --> 3.0.6\r\ncoverage 4.4.2 --> 4.5.2\r\nSphinx 1.6.5 --> 1.8.3\r\nsphinx-rtd-theme 0.2.4 --> 0.4.2\r\nsphinxcontrib-httpdomain 1.6.0 --> 1.7.0\r\nPygments 2.2.0 --> 2.3.1\r\npyflakes 1.6.0 --> 2.0.0\r\nflake8 3.5.0 --> 3.6.0\r\npylint 1.7.4 --> 2.2.2\r\nvirtualenv 15.1.0 --> 16.2.0\r\ntox 2.9.1 --> 3.7.0\r\npytest 3.2.5 --> 4.1.1\r\npytest-xdist 1.20.1 --> 1.26.0\r\npytz 2017.3 --> 2018.9\r\nBabel 2.5.1 --> 2.6.0\r\nsubprocess32 3.2.7 --> 3.5.3\r\npycodestyle 2.3.1 --> 2.4.0\n", "before_files": [{"content": "# Copyright 2013-2019 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Martin Barisits <martin.barisits@cern.ch>, 2019\n#\n# Topic: Replicas table PK definition is in wrong order\n# Revision ID: 3345511706b8\n# Revises: 9eb936a81eb1\n# Creation Date: 2019-01-30 14:20:35.058889\n\nfrom alembic.op import (create_primary_key, drop_constraint)\n\nfrom alembic import context\n\n\n# revision identifiers used by alembic\nrevision = '3345511706b8' # pylint: disable=invalid-name\ndown_revision = 'bf3baa1c1474' # pylint: disable=invalid-name\n\n\ndef upgrade():\n '''\n Upgrade the database to this revision\n '''\n if context.get_context().dialect.name != 'sqlite':\n drop_constraint('REPLICAS_PK', 'replicas')\n create_primary_key('REPLICAS_PK', 'replicas', ['scope', 'name', 'rse_id'])\n\n\ndef downgrade():\n '''\n Downgrade the database to the previous revision\n '''\n if context.get_context().dialect.name != 'sqlite':\n drop_constraint('REPLICAS_PK', 'replicas')\n create_primary_key('REPLICAS_PK', 'replicas', ['rse_id', 'scope', 'name'])\n", "path": "lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py"}], "after_files": [{"content": "# Copyright 2013-2019 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Martin Barisits <martin.barisits@cern.ch>, 2019\n#\n# Topic: Replicas table PK definition is in wrong order\n# Revision ID: 3345511706b8\n# Revises: 9eb936a81eb1\n# Creation Date: 2019-01-30 14:20:35.058889\n\nfrom alembic.op import (create_primary_key, drop_constraint)\n\nfrom alembic import context\n\n\n# revision identifiers used by alembic\nrevision = '3345511706b8' # pylint: disable=invalid-name\ndown_revision = '01eaf73ab656' # pylint: disable=invalid-name\n\n\ndef upgrade():\n '''\n Upgrade the database to this revision\n '''\n if context.get_context().dialect.name != 'sqlite':\n drop_constraint('REPLICAS_PK', 'replicas')\n create_primary_key('REPLICAS_PK', 'replicas', ['scope', 'name', 'rse_id'])\n\n\ndef downgrade():\n '''\n Downgrade the database to the previous revision\n '''\n if context.get_context().dialect.name != 'sqlite':\n drop_constraint('REPLICAS_PK', 'replicas')\n create_primary_key('REPLICAS_PK', 'replicas', ['rse_id', 'scope', 'name'])\n", "path": "lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py"}]}
1,585
248
gh_patches_debug_65233
rasdani/github-patches
git_diff
streamlink__streamlink-4763
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- plugins.huya: As of today, Huya plugin has been broken ### Checklist - [X] This is a plugin issue and not a different kind of issue - [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink) - [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22) - [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master) ### Streamlink version Latest stable release ### Description When I try to open any public Huya stream I get an error message. Assuming Huya has changed how live IDs are handled and switched to strings. ### Debug log ```text hina@Hinas-MacBook-Pro ~ % streamlink https://www.huya.com/660108 best --loglevel debug [cli][debug] OS: macOS 12.5 [cli][debug] Python: 3.10.6 [cli][debug] Streamlink: 4.3.0 [cli][debug] Dependencies: [cli][debug] isodate: 0.6.1 [cli][debug] lxml: 4.9.1 [cli][debug] pycountry: 22.3.5 [cli][debug] pycryptodome: 3.15.0 [cli][debug] PySocks: 1.7.1 [cli][debug] requests: 2.28.1 [cli][debug] websocket-client: 1.3.3 [cli][debug] Arguments: [cli][debug] url=https://www.huya.com/660108 [cli][debug] stream=['best'] [cli][debug] --loglevel=debug [cli][info] Found matching plugin huya for URL https://www.huya.com/660108 error: Unable to validate response text: ValidationError(NoneOrAllSchema): ValidationError(dict): Unable to validate value of key 'data' Context(AnySchema): ValidationError(dict): Unable to validate value of key 'gameLiveInfo' Context(dict): Unable to validate value of key 'liveId' Context(type): Type of '7134607205476108031' should be int, but is str hina@Hinas-MacBook-Pro ~ % ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugins/huya.py` Content: ``` 1 """ 2 $description Chinese live-streaming platform for live video game broadcasts and individual live streams. 3 $url huya.com 4 $type live 5 """ 6 7 import base64 8 import logging 9 import re 10 from html import unescape as html_unescape 11 from typing import Dict 12 13 from streamlink.plugin import Plugin, pluginmatcher 14 from streamlink.plugin.api import validate 15 from streamlink.stream.http import HTTPStream 16 17 log = logging.getLogger(__name__) 18 19 20 @pluginmatcher(re.compile( 21 r"https?://(?:www\.)?huya\.com/(?P<channel>[^/]+)" 22 )) 23 class Huya(Plugin): 24 QUALITY_WEIGHTS: Dict[str, int] = {} 25 26 @classmethod 27 def stream_weight(cls, key): 28 weight = cls.QUALITY_WEIGHTS.get(key) 29 if weight: 30 return weight, "huya" 31 32 return super().stream_weight(key) 33 34 def _get_streams(self): 35 data = self.session.http.get(self.url, schema=validate.Schema( 36 validate.parse_html(), 37 validate.xml_xpath_string(".//script[contains(text(),'var hyPlayerConfig = {')][1]/text()"), 38 validate.none_or_all( 39 re.compile(r"""(?P<q>"?)stream(?P=q)\s*:\s*(?:"(?P<base64>.+?)"|(?P<json>\{.+?})\s*}\s*;)"""), 40 ), 41 validate.none_or_all( 42 validate.any( 43 validate.all( 44 validate.get("base64"), 45 str, 46 validate.transform(base64.b64decode), 47 ), 48 validate.all( 49 validate.get("json"), 50 str, 51 ), 52 ), 53 validate.parse_json(), 54 { 55 "data": [{ 56 "gameLiveInfo": { 57 "liveId": int, 58 "nick": str, 59 "roomName": str, 60 }, 61 "gameStreamInfoList": [validate.all( 62 { 63 "sCdnType": str, 64 "iPCPriorityRate": int, 65 "sStreamName": str, 66 "sFlvUrl": str, 67 "sFlvUrlSuffix": str, 68 "sFlvAntiCode": validate.all(str, validate.transform(lambda v: html_unescape(v))), 69 }, 70 validate.union_get( 71 "sCdnType", 72 "iPCPriorityRate", 73 "sStreamName", 74 "sFlvUrl", 75 "sFlvUrlSuffix", 76 "sFlvAntiCode", 77 )), 78 ], 79 }], 80 }, 81 validate.get(("data", 0)), 82 validate.union_get( 83 ("gameLiveInfo", "liveId"), 84 ("gameLiveInfo", "nick"), 85 ("gameLiveInfo", "roomName"), 86 "gameStreamInfoList", 87 ), 88 ), 89 )) 90 if not data: 91 return 92 93 self.id, self.author, self.title, streamdata = data 94 95 for cdntype, priority, streamname, flvurl, suffix, anticode in streamdata: 96 name = f"source_{cdntype.lower()}" 97 self.QUALITY_WEIGHTS[name] = priority 98 yield name, HTTPStream(self.session, f"{flvurl}/{streamname}.{suffix}?{anticode}") 99 100 log.debug(f"QUALITY_WEIGHTS: {self.QUALITY_WEIGHTS!r}") 101 102 103 __plugin__ = Huya 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugins/huya.py b/src/streamlink/plugins/huya.py --- a/src/streamlink/plugins/huya.py +++ b/src/streamlink/plugins/huya.py @@ -54,7 +54,7 @@ { "data": [{ "gameLiveInfo": { - "liveId": int, + "liveId": str, "nick": str, "roomName": str, },
{"golden_diff": "diff --git a/src/streamlink/plugins/huya.py b/src/streamlink/plugins/huya.py\n--- a/src/streamlink/plugins/huya.py\n+++ b/src/streamlink/plugins/huya.py\n@@ -54,7 +54,7 @@\n {\n \"data\": [{\n \"gameLiveInfo\": {\n- \"liveId\": int,\n+ \"liveId\": str,\n \"nick\": str,\n \"roomName\": str,\n },\n", "issue": "plugins.huya: As of today, Huya plugin has been broken\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nWhen I try to open any public Huya stream I get an error message. Assuming Huya has changed how live IDs are handled and switched to strings.\n\n### Debug log\n\n```text\nhina@Hinas-MacBook-Pro ~ % streamlink https://www.huya.com/660108 best --loglevel debug\r\n[cli][debug] OS: macOS 12.5\r\n[cli][debug] Python: 3.10.6\r\n[cli][debug] Streamlink: 4.3.0\r\n[cli][debug] Dependencies:\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.1\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.15.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.1\r\n[cli][debug] websocket-client: 1.3.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.huya.com/660108\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin huya for URL https://www.huya.com/660108\r\nerror: Unable to validate response text: ValidationError(NoneOrAllSchema):\r\n ValidationError(dict):\r\n Unable to validate value of key 'data'\r\n Context(AnySchema):\r\n ValidationError(dict):\r\n Unable to validate value of key 'gameLiveInfo'\r\n Context(dict):\r\n Unable to validate value of key 'liveId'\r\n Context(type):\r\n Type of '7134607205476108031' should be int, but is str\r\nhina@Hinas-MacBook-Pro ~ %\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Chinese live-streaming platform for live video game broadcasts and individual live streams.\n$url huya.com\n$type live\n\"\"\"\n\nimport base64\nimport logging\nimport re\nfrom html import unescape as html_unescape\nfrom typing import Dict\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.http import HTTPStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?huya\\.com/(?P<channel>[^/]+)\"\n))\nclass Huya(Plugin):\n QUALITY_WEIGHTS: Dict[str, int] = {}\n\n @classmethod\n def stream_weight(cls, key):\n weight = cls.QUALITY_WEIGHTS.get(key)\n if weight:\n return weight, \"huya\"\n\n return super().stream_weight(key)\n\n def _get_streams(self):\n data = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'var hyPlayerConfig = {')][1]/text()\"),\n validate.none_or_all(\n re.compile(r\"\"\"(?P<q>\"?)stream(?P=q)\\s*:\\s*(?:\"(?P<base64>.+?)\"|(?P<json>\\{.+?})\\s*}\\s*;)\"\"\"),\n ),\n validate.none_or_all(\n validate.any(\n validate.all(\n validate.get(\"base64\"),\n str,\n validate.transform(base64.b64decode),\n ),\n validate.all(\n validate.get(\"json\"),\n str,\n ),\n ),\n validate.parse_json(),\n {\n \"data\": [{\n \"gameLiveInfo\": {\n \"liveId\": int,\n \"nick\": str,\n \"roomName\": str,\n },\n \"gameStreamInfoList\": [validate.all(\n {\n \"sCdnType\": str,\n \"iPCPriorityRate\": int,\n \"sStreamName\": str,\n \"sFlvUrl\": str,\n \"sFlvUrlSuffix\": str,\n \"sFlvAntiCode\": validate.all(str, validate.transform(lambda v: html_unescape(v))),\n },\n validate.union_get(\n \"sCdnType\",\n \"iPCPriorityRate\",\n \"sStreamName\",\n \"sFlvUrl\",\n \"sFlvUrlSuffix\",\n \"sFlvAntiCode\",\n )),\n ],\n }],\n },\n validate.get((\"data\", 0)),\n validate.union_get(\n (\"gameLiveInfo\", \"liveId\"),\n (\"gameLiveInfo\", \"nick\"),\n (\"gameLiveInfo\", \"roomName\"),\n \"gameStreamInfoList\",\n ),\n ),\n ))\n if not data:\n return\n\n self.id, self.author, self.title, streamdata = data\n\n for cdntype, priority, streamname, flvurl, suffix, anticode in streamdata:\n name = f\"source_{cdntype.lower()}\"\n self.QUALITY_WEIGHTS[name] = priority\n yield name, HTTPStream(self.session, f\"{flvurl}/{streamname}.{suffix}?{anticode}\")\n\n log.debug(f\"QUALITY_WEIGHTS: {self.QUALITY_WEIGHTS!r}\")\n\n\n__plugin__ = Huya\n", "path": "src/streamlink/plugins/huya.py"}], "after_files": [{"content": "\"\"\"\n$description Chinese live-streaming platform for live video game broadcasts and individual live streams.\n$url huya.com\n$type live\n\"\"\"\n\nimport base64\nimport logging\nimport re\nfrom html import unescape as html_unescape\nfrom typing import Dict\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.http import HTTPStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?huya\\.com/(?P<channel>[^/]+)\"\n))\nclass Huya(Plugin):\n QUALITY_WEIGHTS: Dict[str, int] = {}\n\n @classmethod\n def stream_weight(cls, key):\n weight = cls.QUALITY_WEIGHTS.get(key)\n if weight:\n return weight, \"huya\"\n\n return super().stream_weight(key)\n\n def _get_streams(self):\n data = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'var hyPlayerConfig = {')][1]/text()\"),\n validate.none_or_all(\n re.compile(r\"\"\"(?P<q>\"?)stream(?P=q)\\s*:\\s*(?:\"(?P<base64>.+?)\"|(?P<json>\\{.+?})\\s*}\\s*;)\"\"\"),\n ),\n validate.none_or_all(\n validate.any(\n validate.all(\n validate.get(\"base64\"),\n str,\n validate.transform(base64.b64decode),\n ),\n validate.all(\n validate.get(\"json\"),\n str,\n ),\n ),\n validate.parse_json(),\n {\n \"data\": [{\n \"gameLiveInfo\": {\n \"liveId\": str,\n \"nick\": str,\n \"roomName\": str,\n },\n \"gameStreamInfoList\": [validate.all(\n {\n \"sCdnType\": str,\n \"iPCPriorityRate\": int,\n \"sStreamName\": str,\n \"sFlvUrl\": str,\n \"sFlvUrlSuffix\": str,\n \"sFlvAntiCode\": validate.all(str, validate.transform(lambda v: html_unescape(v))),\n },\n validate.union_get(\n \"sCdnType\",\n \"iPCPriorityRate\",\n \"sStreamName\",\n \"sFlvUrl\",\n \"sFlvUrlSuffix\",\n \"sFlvAntiCode\",\n )),\n ],\n }],\n },\n validate.get((\"data\", 0)),\n validate.union_get(\n (\"gameLiveInfo\", \"liveId\"),\n (\"gameLiveInfo\", \"nick\"),\n (\"gameLiveInfo\", \"roomName\"),\n \"gameStreamInfoList\",\n ),\n ),\n ))\n if not data:\n return\n\n self.id, self.author, self.title, streamdata = data\n\n for cdntype, priority, streamname, flvurl, suffix, anticode in streamdata:\n name = f\"source_{cdntype.lower()}\"\n self.QUALITY_WEIGHTS[name] = priority\n yield name, HTTPStream(self.session, f\"{flvurl}/{streamname}.{suffix}?{anticode}\")\n\n log.debug(f\"QUALITY_WEIGHTS: {self.QUALITY_WEIGHTS!r}\")\n\n\n__plugin__ = Huya\n", "path": "src/streamlink/plugins/huya.py"}]}
1,772
97
gh_patches_debug_1132
rasdani/github-patches
git_diff
scikit-hep__pyhf-1790
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Guard SCHEMA_VERSION from version bumps I don't think it is going to be possible to guard the `SCHEMA_VERSION` from `bump2version` so we might need to look for a replacement for `bump2version` that gives guard support. This is going to be a problem when https://github.com/scikit-hep/pyhf/blob/6b0a9317b14da2a452f51d089cb9e493c8f19347/.bumpversion.cfg#L1-L2 hits `1.0.0` and conflicts with https://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L13 and also has to properly pick up the multiple correct instances in https://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L145 _Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/1218#issuecomment-744590434_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup 2 3 extras_require = { 4 'shellcomplete': ['click_completion'], 5 'tensorflow': [ 6 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789 7 'tensorflow-probability>=0.11.0', # c.f. PR #1657 8 ], 9 'torch': ['torch>=1.10.0'], # c.f. PR #1657 10 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501 11 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567 12 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306 13 } 14 extras_require['backends'] = sorted( 15 set( 16 extras_require['tensorflow'] 17 + extras_require['torch'] 18 + extras_require['jax'] 19 + extras_require['minuit'] 20 ) 21 ) 22 extras_require['contrib'] = sorted({'matplotlib', 'requests'}) 23 extras_require['lint'] = sorted({'flake8', 'black>=22.1.0'}) 24 25 extras_require['test'] = sorted( 26 set( 27 extras_require['backends'] 28 + extras_require['xmlio'] 29 + extras_require['contrib'] 30 + extras_require['shellcomplete'] 31 + [ 32 'scikit-hep-testdata>=0.4.11', 33 'pytest>=6.0', 34 'pytest-cov>=2.5.1', 35 'pytest-mock', 36 'requests-mock>=1.9.0', 37 'pytest-benchmark[histogram]', 38 'pytest-console-scripts', 39 'pytest-mpl', 40 'pydocstyle', 41 'papermill~=2.0', 42 'nteract-scrapbook~=0.2', 43 'jupyter', 44 'graphviz', 45 ] 46 ) 47 ) 48 extras_require['docs'] = sorted( 49 set( 50 extras_require['xmlio'] 51 + extras_require['contrib'] 52 + [ 53 'sphinx>=4.0.0', 54 'sphinxcontrib-bibtex~=2.1', 55 'sphinx-click', 56 'sphinx_rtd_theme', 57 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620 58 'ipywidgets', 59 'sphinx-issues', 60 'sphinx-copybutton>=0.3.2', 61 ] 62 ) 63 ) 64 extras_require['develop'] = sorted( 65 set( 66 extras_require['docs'] 67 + extras_require['lint'] 68 + extras_require['test'] 69 + [ 70 'nbdime', 71 'bump2version', 72 'ipython', 73 'pre-commit', 74 'check-manifest', 75 'codemetapy>=0.3.4', 76 'twine', 77 ] 78 ) 79 ) 80 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 81 82 83 setup( 84 extras_require=extras_require, 85 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 86 ) 87 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -68,7 +68,7 @@ + extras_require['test'] + [ 'nbdime', - 'bump2version', + 'tbump>=6.7.0', 'ipython', 'pre-commit', 'check-manifest',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,7 +68,7 @@\n + extras_require['test']\n + [\n 'nbdime',\n- 'bump2version',\n+ 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n", "issue": "Guard SCHEMA_VERSION from version bumps\nI don't think it is going to be possible to guard the `SCHEMA_VERSION` from `bump2version` so we might need to look for a replacement for `bump2version` that gives guard support.\r\n\r\nThis is going to be a problem when \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/6b0a9317b14da2a452f51d089cb9e493c8f19347/.bumpversion.cfg#L1-L2\r\n\r\nhits `1.0.0` and conflicts with \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L13\r\n\r\nand also has to properly pick up the multiple correct instances in \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L145\r\n\r\n_Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/1218#issuecomment-744590434_\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=4.0.0',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=4.0.0',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,436
84
gh_patches_debug_2914
rasdani/github-patches
git_diff
gratipay__gratipay.com-2792
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- broken facebook link when no user_name If all we have is a user_id, we construct the URL improperly. In that case we need: `http://facebook.com/profile.php?id=$ID` But we have: `http://facebook.com/None` broken facebook link when no user_name If all we have is a user_id, we construct the URL improperly. In that case we need: `http://facebook.com/profile.php?id=$ID` But we have: `http://facebook.com/None` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gratipay/elsewhere/facebook.py` Content: ``` 1 from __future__ import absolute_import, division, print_function, unicode_literals 2 3 from gratipay.elsewhere import PlatformOAuth2 4 from gratipay.elsewhere._extractors import key 5 6 7 class Facebook(PlatformOAuth2): 8 9 # Platform attributes 10 name = 'facebook' 11 display_name = 'Facebook' 12 account_url = 'https://www.facebook.com/{user_name}' 13 14 # Auth attributes 15 auth_url = 'https://www.facebook.com/dialog/oauth' 16 access_token_url = 'https://graph.facebook.com/oauth/access_token' 17 oauth_default_scope = ['public_profile,email'] 18 19 # API attributes 20 api_format = 'json' 21 api_url = 'https://graph.facebook.com' 22 api_user_info_path = '/{user_name}' 23 api_user_self_info_path = '/me' 24 25 # User info extractors 26 x_user_id = key('id') 27 x_user_name = key('username') 28 x_display_name = key('name') 29 x_email = key('email') 30 31 def x_avatar_url(self, extracted, info, default): 32 return 'https://graph.facebook.com/' + extracted.user_id + '/picture?width=256&height=256' 33 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gratipay/elsewhere/facebook.py b/gratipay/elsewhere/facebook.py --- a/gratipay/elsewhere/facebook.py +++ b/gratipay/elsewhere/facebook.py @@ -9,7 +9,7 @@ # Platform attributes name = 'facebook' display_name = 'Facebook' - account_url = 'https://www.facebook.com/{user_name}' + account_url = 'https://www.facebook.com/profile.php?id={user_id}' # Auth attributes auth_url = 'https://www.facebook.com/dialog/oauth'
{"golden_diff": "diff --git a/gratipay/elsewhere/facebook.py b/gratipay/elsewhere/facebook.py\n--- a/gratipay/elsewhere/facebook.py\n+++ b/gratipay/elsewhere/facebook.py\n@@ -9,7 +9,7 @@\n # Platform attributes\n name = 'facebook'\n display_name = 'Facebook'\n- account_url = 'https://www.facebook.com/{user_name}'\n+ account_url = 'https://www.facebook.com/profile.php?id={user_id}'\n \n # Auth attributes\n auth_url = 'https://www.facebook.com/dialog/oauth'\n", "issue": "broken facebook link when no user_name\nIf all we have is a user_id, we construct the URL improperly. In that case we need:\n\n`http://facebook.com/profile.php?id=$ID`\n\nBut we have:\n\n`http://facebook.com/None`\n\nbroken facebook link when no user_name\nIf all we have is a user_id, we construct the URL improperly. In that case we need:\n\n`http://facebook.com/profile.php?id=$ID`\n\nBut we have:\n\n`http://facebook.com/None`\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay.elsewhere import PlatformOAuth2\nfrom gratipay.elsewhere._extractors import key\n\n\nclass Facebook(PlatformOAuth2):\n\n # Platform attributes\n name = 'facebook'\n display_name = 'Facebook'\n account_url = 'https://www.facebook.com/{user_name}'\n\n # Auth attributes\n auth_url = 'https://www.facebook.com/dialog/oauth'\n access_token_url = 'https://graph.facebook.com/oauth/access_token'\n oauth_default_scope = ['public_profile,email']\n\n # API attributes\n api_format = 'json'\n api_url = 'https://graph.facebook.com'\n api_user_info_path = '/{user_name}'\n api_user_self_info_path = '/me'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('username')\n x_display_name = key('name')\n x_email = key('email')\n\n def x_avatar_url(self, extracted, info, default):\n return 'https://graph.facebook.com/' + extracted.user_id + '/picture?width=256&height=256'\n", "path": "gratipay/elsewhere/facebook.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay.elsewhere import PlatformOAuth2\nfrom gratipay.elsewhere._extractors import key\n\n\nclass Facebook(PlatformOAuth2):\n\n # Platform attributes\n name = 'facebook'\n display_name = 'Facebook'\n account_url = 'https://www.facebook.com/profile.php?id={user_id}'\n\n # Auth attributes\n auth_url = 'https://www.facebook.com/dialog/oauth'\n access_token_url = 'https://graph.facebook.com/oauth/access_token'\n oauth_default_scope = ['public_profile,email']\n\n # API attributes\n api_format = 'json'\n api_url = 'https://graph.facebook.com'\n api_user_info_path = '/{user_name}'\n api_user_self_info_path = '/me'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('username')\n x_display_name = key('name')\n x_email = key('email')\n\n def x_avatar_url(self, extracted, info, default):\n return 'https://graph.facebook.com/' + extracted.user_id + '/picture?width=256&height=256'\n", "path": "gratipay/elsewhere/facebook.py"}]}
681
124
gh_patches_debug_6473
rasdani/github-patches
git_diff
dotkom__onlineweb4-2102
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Updating "My profile" does not save all profile fields ## What kind of an issue is this? - [x] Bug report - [ ] Feature request ## What is the expected behaviour? Seeing my post number at my [profile overview page](https://online.ntnu.no/profile/overview/) ## What is the current behaviour? Seeing `None Ugyldig postnummer` at my [profile overview page](https://online.ntnu.no/profile/overview/) <!-- if this is a bug report --> ## How do you reproduce this problem? - Go to the [profile edit page](https://online.ntnu.no/profile/edit/), write `7050` (or another post number) into the _Postnummer_ field - Press _Lagre brukerinformasjon_. No visible errors on page or in browser console, green status box at the top says _"Brukerprofilen din ble endret"_ - Go to the [profile overview page](https://online.ntnu.no/profile/overview/), look for the _Adresse_. You should now see `None Ugyldig postnummer`. <!-- if this is a bug report --> <!-- provide steps to reproduce this problem, preferably in a bullet point list --> ## Other information <!-- include screenshots if appropriate --> <!-- add labels you see fit. please do not exaggerate. --> ### Screenshots [Profile edit page:](https://online.ntnu.no/profile/edit/) ![image](https://user-images.githubusercontent.com/12127271/32372177-150971b4-c094-11e7-8a05-01eee0603e30.png) [Profile overview page:](https://online.ntnu.no/profile/overview/) ![image](https://user-images.githubusercontent.com/12127271/32372269-6b2cfdea-c094-11e7-9b39-a459a86451dd.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `apps/profiles/forms.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import re 3 4 from django import forms 5 from django.contrib import auth 6 from django.utils.translation import ugettext as _ 7 8 from apps.authentication.models import OnlineUser, Position 9 from apps.profiles.models import Privacy 10 11 ZIP_CODE_VALIDATION_ERROR = "Postnummer må bestå av fire siffer." 12 13 14 class ProfileForm(forms.ModelForm): 15 class Meta(object): 16 model = OnlineUser 17 18 fields = [ 19 'nickname', 20 'website', 21 'phone_number', 22 'address', 23 'zip_code', 24 'allergies', 25 'compiled', 26 'bio', 27 'gender', 28 'github', 29 'linkedin' 30 ] 31 widgets = { 32 'allergies': forms.Textarea(attrs={'id': 'allergies'}), 33 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}), 34 'bio': forms.Textarea(attrs={'id': 'bio'}), 35 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}), 36 } 37 38 def clean_zip_code(self): 39 zip_code = self.cleaned_data['zip_code'] 40 if zip_code and len(zip_code) != 0 and not re.match(r'\d{4}', zip_code): 41 self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR) 42 43 44 class PrivacyForm(forms.ModelForm): 45 class Meta(object): 46 model = Privacy 47 exclude = ['user', 'expose_nickname'] 48 49 50 class MailSettingsForm(forms.ModelForm): 51 class Meta(object): 52 model = OnlineUser 53 fields = ['infomail', ] 54 55 56 class PositionForm(forms.ModelForm): 57 class Meta(object): 58 model = Position 59 exclude = ['user'] 60 widgets = { 61 'committee': forms.Select(attrs={'class': 'form-control'}), 62 'position': forms.Select(attrs={'class': 'form-control'}), 63 } 64 65 def clean(self): 66 super(PositionForm, self).clean() 67 68 range_compiler = re.compile(r'\d{4}-\d{4}') 69 year_range = self.cleaned_data['period'] 70 71 # If it doesn't match the format YYYY-YYYY 72 if not range_compiler.match(year_range): 73 self._errors['period'] = self.error_class( 74 [_('Feil format. Dobbelsjekk at input er på formatet YYYY-YYYY.')] 75 ) 76 return self.cleaned_data 77 78 years = year_range.split('-') 79 80 # If somewhat they fucked up input, we don't want None-shit after the split. 81 if not years[0] or not years[1]: 82 self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')]) 83 return self.cleaned_data 84 85 # If first year is larger than latter, or the diff is more than one, fail. 86 if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1: 87 self._errors['period'] = self.error_class([_('Ikke gyldig års-intervall. Bare ett år er tillat.')]) 88 89 return self.cleaned_data 90 91 92 class MembershipSettingsForm(forms.ModelForm): 93 def __init__(self, *args, **kwargs): 94 super(MembershipSettingsForm, self).__init__(*args, **kwargs) 95 self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker' 96 97 class Meta(object): 98 model = OnlineUser 99 fields = ['field_of_study', 'started_date'] 100 101 widgets = { 102 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}), 103 } 104 105 106 class InternalServicesForm(forms.Form): 107 ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u"Online passord")) 108 services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u"Ønsket service passord")) 109 current_user = None 110 111 def clean(self): 112 super(InternalServicesForm, self).clean() 113 if self.is_valid(): 114 cleaned_data = self.cleaned_data 115 116 # User object relation here 117 user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password']) 118 119 if user is None or user.id != self.current_user.id: 120 self._errors['ow4_password'] = self.error_class([_(u"Passordet er ikke korrekt.")]) 121 122 return cleaned_data 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py --- a/apps/profiles/forms.py +++ b/apps/profiles/forms.py @@ -37,9 +37,11 @@ def clean_zip_code(self): zip_code = self.cleaned_data['zip_code'] - if zip_code and len(zip_code) != 0 and not re.match(r'\d{4}', zip_code): + if not zip_code or not re.match(r'^\d{4}$', zip_code): self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR) + return zip_code + class PrivacyForm(forms.ModelForm): class Meta(object):
{"golden_diff": "diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py\n--- a/apps/profiles/forms.py\n+++ b/apps/profiles/forms.py\n@@ -37,9 +37,11 @@\n \n def clean_zip_code(self):\n zip_code = self.cleaned_data['zip_code']\n- if zip_code and len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n+ if not zip_code or not re.match(r'^\\d{4}$', zip_code):\n self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)\n \n+ return zip_code\n+\n \n class PrivacyForm(forms.ModelForm):\n class Meta(object):\n", "issue": "Updating \"My profile\" does not save all profile fields\n## What kind of an issue is this?\r\n\r\n- [x] Bug report\r\n- [ ] Feature request\r\n\r\n\r\n## What is the expected behaviour?\r\nSeeing my post number at my [profile overview page](https://online.ntnu.no/profile/overview/)\r\n\r\n\r\n## What is the current behaviour?\r\nSeeing `None Ugyldig postnummer` at my [profile overview page](https://online.ntnu.no/profile/overview/)\r\n\r\n<!-- if this is a bug report -->\r\n\r\n\r\n## How do you reproduce this problem? \r\n\r\n- Go to the [profile edit page](https://online.ntnu.no/profile/edit/), write `7050` (or another post number) into the _Postnummer_ field\r\n- Press _Lagre brukerinformasjon_. No visible errors on page or in browser console, green status box at the top says _\"Brukerprofilen din ble endret\"_\r\n- Go to the [profile overview page](https://online.ntnu.no/profile/overview/), look for the _Adresse_. You should now see `None Ugyldig postnummer`.\r\n\r\n<!-- if this is a bug report -->\r\n<!-- provide steps to reproduce this problem, preferably in a bullet point list -->\r\n\r\n\r\n## Other information\r\n\r\n<!-- include screenshots if appropriate -->\r\n<!-- add labels you see fit. please do not exaggerate. -->\r\n### Screenshots\r\n[Profile edit page:](https://online.ntnu.no/profile/edit/)\r\n![image](https://user-images.githubusercontent.com/12127271/32372177-150971b4-c094-11e7-8a05-01eee0603e30.png)\r\n[Profile overview page:](https://online.ntnu.no/profile/overview/)\r\n![image](https://user-images.githubusercontent.com/12127271/32372269-6b2cfdea-c094-11e7-9b39-a459a86451dd.png)\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser, Position\nfrom apps.profiles.models import Privacy\n\nZIP_CODE_VALIDATION_ERROR = \"Postnummer m\u00e5 best\u00e5 av fire siffer.\"\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n\n fields = [\n 'nickname',\n 'website',\n 'phone_number',\n 'address',\n 'zip_code',\n 'allergies',\n 'compiled',\n 'bio',\n 'gender',\n 'github',\n 'linkedin'\n ]\n widgets = {\n 'allergies': forms.Textarea(attrs={'id': 'allergies'}),\n 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}),\n 'bio': forms.Textarea(attrs={'id': 'bio'}),\n 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),\n }\n\n def clean_zip_code(self):\n zip_code = self.cleaned_data['zip_code']\n if zip_code and len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)\n\n\nclass PrivacyForm(forms.ModelForm):\n class Meta(object):\n model = Privacy\n exclude = ['user', 'expose_nickname']\n\n\nclass MailSettingsForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass PositionForm(forms.ModelForm):\n class Meta(object):\n model = Position\n exclude = ['user']\n widgets = {\n 'committee': forms.Select(attrs={'class': 'form-control'}),\n 'position': forms.Select(attrs={'class': 'form-control'}),\n }\n\n def clean(self):\n super(PositionForm, self).clean()\n\n range_compiler = re.compile(r'\\d{4}-\\d{4}')\n year_range = self.cleaned_data['period']\n\n # If it doesn't match the format YYYY-YYYY\n if not range_compiler.match(year_range):\n self._errors['period'] = self.error_class(\n [_('Feil format. Dobbelsjekk at input er p\u00e5 formatet YYYY-YYYY.')]\n )\n return self.cleaned_data\n\n years = year_range.split('-')\n\n # If somewhat they fucked up input, we don't want None-shit after the split.\n if not years[0] or not years[1]:\n self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')])\n return self.cleaned_data\n\n # If first year is larger than latter, or the diff is more than one, fail.\n if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1:\n self._errors['period'] = self.error_class([_('Ikke gyldig \u00e5rs-intervall. Bare ett \u00e5r er tillat.')])\n\n return self.cleaned_data\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta(object):\n model = OnlineUser\n fields = ['field_of_study', 'started_date']\n\n widgets = {\n 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),\n }\n\n\nclass InternalServicesForm(forms.Form):\n ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"Online passord\"))\n services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"\u00d8nsket service passord\"))\n current_user = None\n\n def clean(self):\n super(InternalServicesForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # User object relation here\n user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password'])\n\n if user is None or user.id != self.current_user.id:\n self._errors['ow4_password'] = self.error_class([_(u\"Passordet er ikke korrekt.\")])\n\n return cleaned_data\n", "path": "apps/profiles/forms.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser, Position\nfrom apps.profiles.models import Privacy\n\nZIP_CODE_VALIDATION_ERROR = \"Postnummer m\u00e5 best\u00e5 av fire siffer.\"\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n\n fields = [\n 'nickname',\n 'website',\n 'phone_number',\n 'address',\n 'zip_code',\n 'allergies',\n 'compiled',\n 'bio',\n 'gender',\n 'github',\n 'linkedin'\n ]\n widgets = {\n 'allergies': forms.Textarea(attrs={'id': 'allergies'}),\n 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}),\n 'bio': forms.Textarea(attrs={'id': 'bio'}),\n 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),\n }\n\n def clean_zip_code(self):\n zip_code = self.cleaned_data['zip_code']\n if not zip_code or not re.match(r'^\\d{4}$', zip_code):\n self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)\n\n return zip_code\n\n\nclass PrivacyForm(forms.ModelForm):\n class Meta(object):\n model = Privacy\n exclude = ['user', 'expose_nickname']\n\n\nclass MailSettingsForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass PositionForm(forms.ModelForm):\n class Meta(object):\n model = Position\n exclude = ['user']\n widgets = {\n 'committee': forms.Select(attrs={'class': 'form-control'}),\n 'position': forms.Select(attrs={'class': 'form-control'}),\n }\n\n def clean(self):\n super(PositionForm, self).clean()\n\n range_compiler = re.compile(r'\\d{4}-\\d{4}')\n year_range = self.cleaned_data['period']\n\n # If it doesn't match the format YYYY-YYYY\n if not range_compiler.match(year_range):\n self._errors['period'] = self.error_class(\n [_('Feil format. Dobbelsjekk at input er p\u00e5 formatet YYYY-YYYY.')]\n )\n return self.cleaned_data\n\n years = year_range.split('-')\n\n # If somewhat they fucked up input, we don't want None-shit after the split.\n if not years[0] or not years[1]:\n self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')])\n return self.cleaned_data\n\n # If first year is larger than latter, or the diff is more than one, fail.\n if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1:\n self._errors['period'] = self.error_class([_('Ikke gyldig \u00e5rs-intervall. Bare ett \u00e5r er tillat.')])\n\n return self.cleaned_data\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta(object):\n model = OnlineUser\n fields = ['field_of_study', 'started_date']\n\n widgets = {\n 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),\n }\n\n\nclass InternalServicesForm(forms.Form):\n ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"Online passord\"))\n services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"\u00d8nsket service passord\"))\n current_user = None\n\n def clean(self):\n super(InternalServicesForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # User object relation here\n user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password'])\n\n if user is None or user.id != self.current_user.id:\n self._errors['ow4_password'] = self.error_class([_(u\"Passordet er ikke korrekt.\")])\n\n return cleaned_data\n", "path": "apps/profiles/forms.py"}]}
1,908
145
gh_patches_debug_4927
rasdani/github-patches
git_diff
falconry__falcon-1997
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Code listing for app.py in "look” Tutorial has a bug In the [Serving images](https://falcon.readthedocs.io/en/stable/user/tutorial.html#serving-images) section, the code listing for `app.py` tries to import the `images` module as `import images`. I believe this should be `from look import images` or depending on one's preferences `import look.images` and refactor references to `images` as `look.images`. I prefer the former: ```python import os import falcon from look import images def create_app(image_store): api = falcon.API() api.add_route('/images', images.Collection(image_store)) api.add_route('/images/{name}', images.Item(image_store)) return api def get_app(): storage_path = os.environ.get('LOOK_STORAGE_PATH', '.') image_store = images.ImageStore(storage_path) return create_app(image_store) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `falcon/media/validators/jsonschema.py` Content: ``` 1 from functools import wraps 2 from inspect import iscoroutinefunction 3 4 import falcon 5 6 try: 7 import jsonschema 8 except ImportError: # pragma: nocover 9 pass 10 11 12 def validate(req_schema=None, resp_schema=None, is_async=False): 13 """Validate ``req.media`` using JSON Schema. 14 15 This decorator provides standard JSON Schema validation via the 16 ``jsonschema`` package available from PyPI. Semantic validation via 17 the *format* keyword is enabled for the default checkers implemented 18 by ``jsonschema.FormatChecker``. 19 20 Note: 21 The `jsonschema`` package must be installed separately in order to use 22 this decorator, as Falcon does not install it by default. 23 24 See `json-schema.org <http://json-schema.org/>`_ for more 25 information on defining a compatible dictionary. 26 27 Keyword Args: 28 req_schema (dict): A dictionary that follows the JSON 29 Schema specification. The request will be validated against this 30 schema. 31 resp_schema (dict): A dictionary that follows the JSON 32 Schema specification. The response will be validated against this 33 schema. 34 is_async (bool): Set to ``True`` for ASGI apps to provide a hint that 35 the decorated responder is a coroutine function (i.e., that it 36 is defined with ``async def``) or that it returns an awaitable 37 coroutine object. 38 39 Normally, when the function source is declared using ``async def``, 40 the resulting function object is flagged to indicate it returns a 41 coroutine when invoked, and this can be automatically detected. 42 However, it is possible to use a regular function to return an 43 awaitable coroutine object, in which case a hint is required to let 44 the framework know what to expect. Also, a hint is always required 45 when using a cythonized coroutine function, since Cython does not 46 flag them in a way that can be detected in advance, even when the 47 function is declared using ``async def``. 48 49 Example: 50 51 .. tabs:: 52 53 .. tab:: WSGI 54 55 .. code:: python 56 57 from falcon.media.validators import jsonschema 58 59 # -- snip -- 60 61 @jsonschema.validate(my_post_schema) 62 def on_post(self, req, resp): 63 64 # -- snip -- 65 66 .. tab:: ASGI 67 68 .. code:: python 69 70 from falcon.media.validators import jsonschema 71 72 # -- snip -- 73 74 @jsonschema.validate(my_post_schema) 75 async def on_post(self, req, resp): 76 77 # -- snip -- 78 79 .. tab:: ASGI (Cythonized App) 80 81 .. code:: python 82 83 from falcon.media.validators import jsonschema 84 85 # -- snip -- 86 87 @jsonschema.validate(my_post_schema, is_async=True) 88 async def on_post(self, req, resp): 89 90 # -- snip -- 91 92 """ 93 94 def decorator(func): 95 if iscoroutinefunction(func) or is_async: 96 return _validate_async(func, req_schema, resp_schema) 97 98 return _validate(func, req_schema, resp_schema) 99 100 return decorator 101 102 103 def _validate(func, req_schema=None, resp_schema=None): 104 @wraps(func) 105 def wrapper(self, req, resp, *args, **kwargs): 106 if req_schema is not None: 107 try: 108 jsonschema.validate( 109 req.media, req_schema, format_checker=jsonschema.FormatChecker() 110 ) 111 except jsonschema.ValidationError as e: 112 raise falcon.HTTPBadRequest( 113 title='Request data failed validation', description=e.message 114 ) 115 116 result = func(self, req, resp, *args, **kwargs) 117 118 if resp_schema is not None: 119 try: 120 jsonschema.validate( 121 resp.media, resp_schema, format_checker=jsonschema.FormatChecker() 122 ) 123 except jsonschema.ValidationError: 124 raise falcon.HTTPInternalServerError( 125 title='Response data failed validation' 126 # Do not return 'e.message' in the response to 127 # prevent info about possible internal response 128 # formatting bugs from leaking out to users. 129 ) 130 131 return result 132 133 return wrapper 134 135 136 def _validate_async(func, req_schema=None, resp_schema=None): 137 @wraps(func) 138 async def wrapper(self, req, resp, *args, **kwargs): 139 if req_schema is not None: 140 m = await req.get_media() 141 142 try: 143 jsonschema.validate( 144 m, req_schema, format_checker=jsonschema.FormatChecker() 145 ) 146 except jsonschema.ValidationError as e: 147 raise falcon.HTTPBadRequest( 148 title='Request data failed validation', description=e.message 149 ) 150 151 result = await func(self, req, resp, *args, **kwargs) 152 153 if resp_schema is not None: 154 try: 155 jsonschema.validate( 156 resp.media, resp_schema, format_checker=jsonschema.FormatChecker() 157 ) 158 except jsonschema.ValidationError: 159 raise falcon.HTTPInternalServerError( 160 title='Response data failed validation' 161 # Do not return 'e.message' in the response to 162 # prevent info about possible internal response 163 # formatting bugs from leaking out to users. 164 ) 165 166 return result 167 168 return wrapper 169 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/falcon/media/validators/jsonschema.py b/falcon/media/validators/jsonschema.py --- a/falcon/media/validators/jsonschema.py +++ b/falcon/media/validators/jsonschema.py @@ -18,7 +18,7 @@ by ``jsonschema.FormatChecker``. Note: - The `jsonschema`` package must be installed separately in order to use + The ``jsonschema`` package must be installed separately in order to use this decorator, as Falcon does not install it by default. See `json-schema.org <http://json-schema.org/>`_ for more
{"golden_diff": "diff --git a/falcon/media/validators/jsonschema.py b/falcon/media/validators/jsonschema.py\n--- a/falcon/media/validators/jsonschema.py\n+++ b/falcon/media/validators/jsonschema.py\n@@ -18,7 +18,7 @@\n by ``jsonschema.FormatChecker``.\n \n Note:\n- The `jsonschema`` package must be installed separately in order to use\n+ The ``jsonschema`` package must be installed separately in order to use\n this decorator, as Falcon does not install it by default.\n \n See `json-schema.org <http://json-schema.org/>`_ for more\n", "issue": "Code listing for app.py in \"look\u201d Tutorial has a bug\nIn the [Serving images](https://falcon.readthedocs.io/en/stable/user/tutorial.html#serving-images) section, the code listing for `app.py` tries to import the `images` module as `import images`. I believe this should be `from look import images` or depending on one's preferences `import look.images` and refactor references to `images` as `look.images`. I prefer the former:\r\n\r\n```python\r\nimport os\r\n\r\nimport falcon\r\n\r\nfrom look import images\r\n\r\n\r\ndef create_app(image_store):\r\n api = falcon.API()\r\n api.add_route('/images', images.Collection(image_store))\r\n api.add_route('/images/{name}', images.Item(image_store))\r\n return api\r\n\r\n\r\ndef get_app():\r\n storage_path = os.environ.get('LOOK_STORAGE_PATH', '.')\r\n image_store = images.ImageStore(storage_path)\r\n return create_app(image_store)\r\n```\r\n\n", "before_files": [{"content": "from functools import wraps\nfrom inspect import iscoroutinefunction\n\nimport falcon\n\ntry:\n import jsonschema\nexcept ImportError: # pragma: nocover\n pass\n\n\ndef validate(req_schema=None, resp_schema=None, is_async=False):\n \"\"\"Validate ``req.media`` using JSON Schema.\n\n This decorator provides standard JSON Schema validation via the\n ``jsonschema`` package available from PyPI. Semantic validation via\n the *format* keyword is enabled for the default checkers implemented\n by ``jsonschema.FormatChecker``.\n\n Note:\n The `jsonschema`` package must be installed separately in order to use\n this decorator, as Falcon does not install it by default.\n\n See `json-schema.org <http://json-schema.org/>`_ for more\n information on defining a compatible dictionary.\n\n Keyword Args:\n req_schema (dict): A dictionary that follows the JSON\n Schema specification. The request will be validated against this\n schema.\n resp_schema (dict): A dictionary that follows the JSON\n Schema specification. The response will be validated against this\n schema.\n is_async (bool): Set to ``True`` for ASGI apps to provide a hint that\n the decorated responder is a coroutine function (i.e., that it\n is defined with ``async def``) or that it returns an awaitable\n coroutine object.\n\n Normally, when the function source is declared using ``async def``,\n the resulting function object is flagged to indicate it returns a\n coroutine when invoked, and this can be automatically detected.\n However, it is possible to use a regular function to return an\n awaitable coroutine object, in which case a hint is required to let\n the framework know what to expect. Also, a hint is always required\n when using a cythonized coroutine function, since Cython does not\n flag them in a way that can be detected in advance, even when the\n function is declared using ``async def``.\n\n Example:\n\n .. tabs::\n\n .. tab:: WSGI\n\n .. code:: python\n\n from falcon.media.validators import jsonschema\n\n # -- snip --\n\n @jsonschema.validate(my_post_schema)\n def on_post(self, req, resp):\n\n # -- snip --\n\n .. tab:: ASGI\n\n .. code:: python\n\n from falcon.media.validators import jsonschema\n\n # -- snip --\n\n @jsonschema.validate(my_post_schema)\n async def on_post(self, req, resp):\n\n # -- snip --\n\n .. tab:: ASGI (Cythonized App)\n\n .. code:: python\n\n from falcon.media.validators import jsonschema\n\n # -- snip --\n\n @jsonschema.validate(my_post_schema, is_async=True)\n async def on_post(self, req, resp):\n\n # -- snip --\n\n \"\"\"\n\n def decorator(func):\n if iscoroutinefunction(func) or is_async:\n return _validate_async(func, req_schema, resp_schema)\n\n return _validate(func, req_schema, resp_schema)\n\n return decorator\n\n\ndef _validate(func, req_schema=None, resp_schema=None):\n @wraps(func)\n def wrapper(self, req, resp, *args, **kwargs):\n if req_schema is not None:\n try:\n jsonschema.validate(\n req.media, req_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError as e:\n raise falcon.HTTPBadRequest(\n title='Request data failed validation', description=e.message\n )\n\n result = func(self, req, resp, *args, **kwargs)\n\n if resp_schema is not None:\n try:\n jsonschema.validate(\n resp.media, resp_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError:\n raise falcon.HTTPInternalServerError(\n title='Response data failed validation'\n # Do not return 'e.message' in the response to\n # prevent info about possible internal response\n # formatting bugs from leaking out to users.\n )\n\n return result\n\n return wrapper\n\n\ndef _validate_async(func, req_schema=None, resp_schema=None):\n @wraps(func)\n async def wrapper(self, req, resp, *args, **kwargs):\n if req_schema is not None:\n m = await req.get_media()\n\n try:\n jsonschema.validate(\n m, req_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError as e:\n raise falcon.HTTPBadRequest(\n title='Request data failed validation', description=e.message\n )\n\n result = await func(self, req, resp, *args, **kwargs)\n\n if resp_schema is not None:\n try:\n jsonschema.validate(\n resp.media, resp_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError:\n raise falcon.HTTPInternalServerError(\n title='Response data failed validation'\n # Do not return 'e.message' in the response to\n # prevent info about possible internal response\n # formatting bugs from leaking out to users.\n )\n\n return result\n\n return wrapper\n", "path": "falcon/media/validators/jsonschema.py"}], "after_files": [{"content": "from functools import wraps\nfrom inspect import iscoroutinefunction\n\nimport falcon\n\ntry:\n import jsonschema\nexcept ImportError: # pragma: nocover\n pass\n\n\ndef validate(req_schema=None, resp_schema=None, is_async=False):\n \"\"\"Validate ``req.media`` using JSON Schema.\n\n This decorator provides standard JSON Schema validation via the\n ``jsonschema`` package available from PyPI. Semantic validation via\n the *format* keyword is enabled for the default checkers implemented\n by ``jsonschema.FormatChecker``.\n\n Note:\n The ``jsonschema`` package must be installed separately in order to use\n this decorator, as Falcon does not install it by default.\n\n See `json-schema.org <http://json-schema.org/>`_ for more\n information on defining a compatible dictionary.\n\n Keyword Args:\n req_schema (dict): A dictionary that follows the JSON\n Schema specification. The request will be validated against this\n schema.\n resp_schema (dict): A dictionary that follows the JSON\n Schema specification. The response will be validated against this\n schema.\n is_async (bool): Set to ``True`` for ASGI apps to provide a hint that\n the decorated responder is a coroutine function (i.e., that it\n is defined with ``async def``) or that it returns an awaitable\n coroutine object.\n\n Normally, when the function source is declared using ``async def``,\n the resulting function object is flagged to indicate it returns a\n coroutine when invoked, and this can be automatically detected.\n However, it is possible to use a regular function to return an\n awaitable coroutine object, in which case a hint is required to let\n the framework know what to expect. Also, a hint is always required\n when using a cythonized coroutine function, since Cython does not\n flag them in a way that can be detected in advance, even when the\n function is declared using ``async def``.\n\n Example:\n\n .. tabs::\n\n .. tab:: WSGI\n\n .. code:: python\n\n from falcon.media.validators import jsonschema\n\n # -- snip --\n\n @jsonschema.validate(my_post_schema)\n def on_post(self, req, resp):\n\n # -- snip --\n\n .. tab:: ASGI\n\n .. code:: python\n\n from falcon.media.validators import jsonschema\n\n # -- snip --\n\n @jsonschema.validate(my_post_schema)\n async def on_post(self, req, resp):\n\n # -- snip --\n\n .. tab:: ASGI (Cythonized App)\n\n .. code:: python\n\n from falcon.media.validators import jsonschema\n\n # -- snip --\n\n @jsonschema.validate(my_post_schema, is_async=True)\n async def on_post(self, req, resp):\n\n # -- snip --\n\n \"\"\"\n\n def decorator(func):\n if iscoroutinefunction(func) or is_async:\n return _validate_async(func, req_schema, resp_schema)\n\n return _validate(func, req_schema, resp_schema)\n\n return decorator\n\n\ndef _validate(func, req_schema=None, resp_schema=None):\n @wraps(func)\n def wrapper(self, req, resp, *args, **kwargs):\n if req_schema is not None:\n try:\n jsonschema.validate(\n req.media, req_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError as e:\n raise falcon.HTTPBadRequest(\n title='Request data failed validation', description=e.message\n )\n\n result = func(self, req, resp, *args, **kwargs)\n\n if resp_schema is not None:\n try:\n jsonschema.validate(\n resp.media, resp_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError:\n raise falcon.HTTPInternalServerError(\n title='Response data failed validation'\n # Do not return 'e.message' in the response to\n # prevent info about possible internal response\n # formatting bugs from leaking out to users.\n )\n\n return result\n\n return wrapper\n\n\ndef _validate_async(func, req_schema=None, resp_schema=None):\n @wraps(func)\n async def wrapper(self, req, resp, *args, **kwargs):\n if req_schema is not None:\n m = await req.get_media()\n\n try:\n jsonschema.validate(\n m, req_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError as e:\n raise falcon.HTTPBadRequest(\n title='Request data failed validation', description=e.message\n )\n\n result = await func(self, req, resp, *args, **kwargs)\n\n if resp_schema is not None:\n try:\n jsonschema.validate(\n resp.media, resp_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError:\n raise falcon.HTTPInternalServerError(\n title='Response data failed validation'\n # Do not return 'e.message' in the response to\n # prevent info about possible internal response\n # formatting bugs from leaking out to users.\n )\n\n return result\n\n return wrapper\n", "path": "falcon/media/validators/jsonschema.py"}]}
1,975
135
gh_patches_debug_28821
rasdani/github-patches
git_diff
huggingface__text-generation-inference-114
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add typical sampling HF has TypicalWarper and `typical_p`. Would be nice to be able to pass that. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `server/text_generation_server/utils/tokens.py` Content: ``` 1 import re 2 import torch 3 4 from transformers import ( 5 LogitsProcessorList, 6 TemperatureLogitsWarper, 7 TopKLogitsWarper, 8 TopPLogitsWarper, 9 RepetitionPenaltyLogitsProcessor, 10 PreTrainedTokenizerBase, 11 ) 12 from typing import List, Tuple, Optional 13 14 from text_generation_server.pb import generate_pb2 15 from text_generation_server.pb.generate_pb2 import FinishReason 16 from text_generation_server.utils.watermark import WatermarkLogitsProcessor 17 18 19 class Sampling: 20 def __init__(self, seed: int, device: str = "cpu"): 21 self.generator = torch.Generator(device) 22 self.generator.manual_seed(seed) 23 self.seed = seed 24 25 def __call__(self, logits): 26 probs = torch.nn.functional.softmax(logits) 27 next_tokens = torch.multinomial(probs, num_samples=1, generator=self.generator) 28 return next_tokens 29 30 31 class Greedy: 32 def __call__(self, logits): 33 return logits.argmax() 34 35 36 class NextTokenChooser: 37 def __init__( 38 self, 39 watermark=False, 40 temperature=1.0, 41 repetition_penalty=1.0, 42 top_k=None, 43 top_p=None, 44 do_sample=False, 45 seed=0, 46 device="cpu", 47 ): 48 warpers = LogitsProcessorList() 49 # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files 50 # all samplers can be found in `generation_utils_samplers.py` 51 sampling = do_sample 52 53 if watermark: 54 warpers.append(WatermarkLogitsProcessor(device=device)) 55 if repetition_penalty is not None and repetition_penalty != 1.0: 56 warpers.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty)) 57 if temperature is not None and temperature != 1.0: 58 temperature = float(temperature) 59 warpers.append(TemperatureLogitsWarper(temperature)) 60 sampling = True 61 if top_k is not None and top_k != 0: 62 warpers.append(TopKLogitsWarper(top_k=top_k)) 63 sampling = True 64 if top_p is not None and top_p < 1.0: 65 warpers.append(TopPLogitsWarper(top_p=top_p)) 66 sampling = True 67 68 self.warpers = warpers 69 self.choice = Sampling(seed, device) if sampling else Greedy() 70 71 def __call__(self, input_ids, scores): 72 # Warp logits 73 scores = self.warpers(input_ids, scores) 74 75 # Compute logprobs 76 logprobs = torch.log_softmax(scores, -1) 77 78 # Choose tokens 79 next_id = self.choice(scores[-1]) 80 81 return next_id.view(1, 1), logprobs 82 83 @classmethod 84 def from_pb( 85 cls, 86 pb: generate_pb2.NextTokenChooserParameters, 87 device: torch.device, 88 ) -> "NextTokenChooser": 89 return NextTokenChooser( 90 watermark=pb.watermark, 91 temperature=pb.temperature, 92 repetition_penalty=pb.repetition_penalty, 93 top_k=pb.top_k, 94 top_p=pb.top_p, 95 do_sample=pb.do_sample, 96 seed=pb.seed, 97 device=device, 98 ) 99 100 101 class StopSequenceCriteria: 102 def __init__(self, stop_sequence: str): 103 self.regex = re.compile(f".*{stop_sequence}$") 104 105 def __call__(self, output: str) -> bool: 106 if self.regex.findall(output): 107 return True 108 return False 109 110 111 class StoppingCriteria: 112 def __init__( 113 self, 114 eos_token_id: int, 115 stop_sequence_criterias: List[StopSequenceCriteria], 116 max_new_tokens=20, 117 ): 118 self.eos_token_id = eos_token_id 119 self.stop_sequence_criterias = stop_sequence_criterias 120 self.max_new_tokens = max_new_tokens 121 self.current_tokens = 0 122 self.current_output = "" 123 124 def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]: 125 self.current_tokens += 1 126 if self.current_tokens >= self.max_new_tokens: 127 return True, FinishReason.FINISH_REASON_LENGTH 128 129 if last_token == self.eos_token_id: 130 return True, FinishReason.FINISH_REASON_EOS_TOKEN 131 132 self.current_output += last_output 133 for stop_sequence_criteria in self.stop_sequence_criterias: 134 if stop_sequence_criteria(self.current_output): 135 return True, FinishReason.FINISH_REASON_STOP_SEQUENCE 136 137 return False, None 138 139 @classmethod 140 def from_pb( 141 cls, 142 pb: generate_pb2.StoppingCriteriaParameters, 143 tokenizer: PreTrainedTokenizerBase, 144 ) -> "StoppingCriteria": 145 stop_sequence_criterias = [ 146 StopSequenceCriteria(sequence) for sequence in pb.stop_sequences 147 ] 148 return StoppingCriteria( 149 tokenizer.eos_token_id, stop_sequence_criterias, pb.max_new_tokens 150 ) 151 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py --- a/server/text_generation_server/utils/tokens.py +++ b/server/text_generation_server/utils/tokens.py @@ -6,6 +6,7 @@ TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, + TypicalLogitsWarper, RepetitionPenaltyLogitsProcessor, PreTrainedTokenizerBase, ) @@ -41,6 +42,7 @@ repetition_penalty=1.0, top_k=None, top_p=None, + typical_p=None, do_sample=False, seed=0, device="cpu", @@ -64,6 +66,9 @@ if top_p is not None and top_p < 1.0: warpers.append(TopPLogitsWarper(top_p=top_p)) sampling = True + if typical_p is not None and typical_p < 1.0: + warpers.append(TypicalLogitsWarper(mass=typical_p)) + sampling = True self.warpers = warpers self.choice = Sampling(seed, device) if sampling else Greedy() @@ -92,6 +97,7 @@ repetition_penalty=pb.repetition_penalty, top_k=pb.top_k, top_p=pb.top_p, + typical_p=pb.typical_p, do_sample=pb.do_sample, seed=pb.seed, device=device,
{"golden_diff": "diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py\n--- a/server/text_generation_server/utils/tokens.py\n+++ b/server/text_generation_server/utils/tokens.py\n@@ -6,6 +6,7 @@\n TemperatureLogitsWarper,\n TopKLogitsWarper,\n TopPLogitsWarper,\n+ TypicalLogitsWarper,\n RepetitionPenaltyLogitsProcessor,\n PreTrainedTokenizerBase,\n )\n@@ -41,6 +42,7 @@\n repetition_penalty=1.0,\n top_k=None,\n top_p=None,\n+ typical_p=None,\n do_sample=False,\n seed=0,\n device=\"cpu\",\n@@ -64,6 +66,9 @@\n if top_p is not None and top_p < 1.0:\n warpers.append(TopPLogitsWarper(top_p=top_p))\n sampling = True\n+ if typical_p is not None and typical_p < 1.0:\n+ warpers.append(TypicalLogitsWarper(mass=typical_p))\n+ sampling = True\n \n self.warpers = warpers\n self.choice = Sampling(seed, device) if sampling else Greedy()\n@@ -92,6 +97,7 @@\n repetition_penalty=pb.repetition_penalty,\n top_k=pb.top_k,\n top_p=pb.top_p,\n+ typical_p=pb.typical_p,\n do_sample=pb.do_sample,\n seed=pb.seed,\n device=device,\n", "issue": "Add typical sampling\nHF has TypicalWarper and `typical_p`. Would be nice to be able to pass that.\n", "before_files": [{"content": "import re\nimport torch\n\nfrom transformers import (\n LogitsProcessorList,\n TemperatureLogitsWarper,\n TopKLogitsWarper,\n TopPLogitsWarper,\n RepetitionPenaltyLogitsProcessor,\n PreTrainedTokenizerBase,\n)\nfrom typing import List, Tuple, Optional\n\nfrom text_generation_server.pb import generate_pb2\nfrom text_generation_server.pb.generate_pb2 import FinishReason\nfrom text_generation_server.utils.watermark import WatermarkLogitsProcessor\n\n\nclass Sampling:\n def __init__(self, seed: int, device: str = \"cpu\"):\n self.generator = torch.Generator(device)\n self.generator.manual_seed(seed)\n self.seed = seed\n\n def __call__(self, logits):\n probs = torch.nn.functional.softmax(logits)\n next_tokens = torch.multinomial(probs, num_samples=1, generator=self.generator)\n return next_tokens\n\n\nclass Greedy:\n def __call__(self, logits):\n return logits.argmax()\n\n\nclass NextTokenChooser:\n def __init__(\n self,\n watermark=False,\n temperature=1.0,\n repetition_penalty=1.0,\n top_k=None,\n top_p=None,\n do_sample=False,\n seed=0,\n device=\"cpu\",\n ):\n warpers = LogitsProcessorList()\n # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files\n # all samplers can be found in `generation_utils_samplers.py`\n sampling = do_sample\n\n if watermark:\n warpers.append(WatermarkLogitsProcessor(device=device))\n if repetition_penalty is not None and repetition_penalty != 1.0:\n warpers.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty))\n if temperature is not None and temperature != 1.0:\n temperature = float(temperature)\n warpers.append(TemperatureLogitsWarper(temperature))\n sampling = True\n if top_k is not None and top_k != 0:\n warpers.append(TopKLogitsWarper(top_k=top_k))\n sampling = True\n if top_p is not None and top_p < 1.0:\n warpers.append(TopPLogitsWarper(top_p=top_p))\n sampling = True\n\n self.warpers = warpers\n self.choice = Sampling(seed, device) if sampling else Greedy()\n\n def __call__(self, input_ids, scores):\n # Warp logits\n scores = self.warpers(input_ids, scores)\n\n # Compute logprobs\n logprobs = torch.log_softmax(scores, -1)\n\n # Choose tokens\n next_id = self.choice(scores[-1])\n\n return next_id.view(1, 1), logprobs\n\n @classmethod\n def from_pb(\n cls,\n pb: generate_pb2.NextTokenChooserParameters,\n device: torch.device,\n ) -> \"NextTokenChooser\":\n return NextTokenChooser(\n watermark=pb.watermark,\n temperature=pb.temperature,\n repetition_penalty=pb.repetition_penalty,\n top_k=pb.top_k,\n top_p=pb.top_p,\n do_sample=pb.do_sample,\n seed=pb.seed,\n device=device,\n )\n\n\nclass StopSequenceCriteria:\n def __init__(self, stop_sequence: str):\n self.regex = re.compile(f\".*{stop_sequence}$\")\n\n def __call__(self, output: str) -> bool:\n if self.regex.findall(output):\n return True\n return False\n\n\nclass StoppingCriteria:\n def __init__(\n self,\n eos_token_id: int,\n stop_sequence_criterias: List[StopSequenceCriteria],\n max_new_tokens=20,\n ):\n self.eos_token_id = eos_token_id\n self.stop_sequence_criterias = stop_sequence_criterias\n self.max_new_tokens = max_new_tokens\n self.current_tokens = 0\n self.current_output = \"\"\n\n def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]:\n self.current_tokens += 1\n if self.current_tokens >= self.max_new_tokens:\n return True, FinishReason.FINISH_REASON_LENGTH\n\n if last_token == self.eos_token_id:\n return True, FinishReason.FINISH_REASON_EOS_TOKEN\n\n self.current_output += last_output\n for stop_sequence_criteria in self.stop_sequence_criterias:\n if stop_sequence_criteria(self.current_output):\n return True, FinishReason.FINISH_REASON_STOP_SEQUENCE\n\n return False, None\n\n @classmethod\n def from_pb(\n cls,\n pb: generate_pb2.StoppingCriteriaParameters,\n tokenizer: PreTrainedTokenizerBase,\n ) -> \"StoppingCriteria\":\n stop_sequence_criterias = [\n StopSequenceCriteria(sequence) for sequence in pb.stop_sequences\n ]\n return StoppingCriteria(\n tokenizer.eos_token_id, stop_sequence_criterias, pb.max_new_tokens\n )\n", "path": "server/text_generation_server/utils/tokens.py"}], "after_files": [{"content": "import re\nimport torch\n\nfrom transformers import (\n LogitsProcessorList,\n TemperatureLogitsWarper,\n TopKLogitsWarper,\n TopPLogitsWarper,\n TypicalLogitsWarper,\n RepetitionPenaltyLogitsProcessor,\n PreTrainedTokenizerBase,\n)\nfrom typing import List, Tuple, Optional\n\nfrom text_generation_server.pb import generate_pb2\nfrom text_generation_server.pb.generate_pb2 import FinishReason\nfrom text_generation_server.utils.watermark import WatermarkLogitsProcessor\n\n\nclass Sampling:\n def __init__(self, seed: int, device: str = \"cpu\"):\n self.generator = torch.Generator(device)\n self.generator.manual_seed(seed)\n self.seed = seed\n\n def __call__(self, logits):\n probs = torch.nn.functional.softmax(logits)\n next_tokens = torch.multinomial(probs, num_samples=1, generator=self.generator)\n return next_tokens\n\n\nclass Greedy:\n def __call__(self, logits):\n return logits.argmax()\n\n\nclass NextTokenChooser:\n def __init__(\n self,\n watermark=False,\n temperature=1.0,\n repetition_penalty=1.0,\n top_k=None,\n top_p=None,\n typical_p=None,\n do_sample=False,\n seed=0,\n device=\"cpu\",\n ):\n warpers = LogitsProcessorList()\n # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files\n # all samplers can be found in `generation_utils_samplers.py`\n sampling = do_sample\n\n if watermark:\n warpers.append(WatermarkLogitsProcessor(device=device))\n if repetition_penalty is not None and repetition_penalty != 1.0:\n warpers.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty))\n if temperature is not None and temperature != 1.0:\n temperature = float(temperature)\n warpers.append(TemperatureLogitsWarper(temperature))\n sampling = True\n if top_k is not None and top_k != 0:\n warpers.append(TopKLogitsWarper(top_k=top_k))\n sampling = True\n if top_p is not None and top_p < 1.0:\n warpers.append(TopPLogitsWarper(top_p=top_p))\n sampling = True\n if typical_p is not None and typical_p < 1.0:\n warpers.append(TypicalLogitsWarper(mass=typical_p))\n sampling = True\n\n self.warpers = warpers\n self.choice = Sampling(seed, device) if sampling else Greedy()\n\n def __call__(self, input_ids, scores):\n # Warp logits\n scores = self.warpers(input_ids, scores)\n\n # Compute logprobs\n logprobs = torch.log_softmax(scores, -1)\n\n # Choose tokens\n next_id = self.choice(scores[-1])\n\n return next_id.view(1, 1), logprobs\n\n @classmethod\n def from_pb(\n cls,\n pb: generate_pb2.NextTokenChooserParameters,\n device: torch.device,\n ) -> \"NextTokenChooser\":\n return NextTokenChooser(\n watermark=pb.watermark,\n temperature=pb.temperature,\n repetition_penalty=pb.repetition_penalty,\n top_k=pb.top_k,\n top_p=pb.top_p,\n typical_p=pb.typical_p,\n do_sample=pb.do_sample,\n seed=pb.seed,\n device=device,\n )\n\n\nclass StopSequenceCriteria:\n def __init__(self, stop_sequence: str):\n self.regex = re.compile(f\".*{stop_sequence}$\")\n\n def __call__(self, output: str) -> bool:\n if self.regex.findall(output):\n return True\n return False\n\n\nclass StoppingCriteria:\n def __init__(\n self,\n eos_token_id: int,\n stop_sequence_criterias: List[StopSequenceCriteria],\n max_new_tokens=20,\n ):\n self.eos_token_id = eos_token_id\n self.stop_sequence_criterias = stop_sequence_criterias\n self.max_new_tokens = max_new_tokens\n self.current_tokens = 0\n self.current_output = \"\"\n\n def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]:\n self.current_tokens += 1\n if self.current_tokens >= self.max_new_tokens:\n return True, FinishReason.FINISH_REASON_LENGTH\n\n if last_token == self.eos_token_id:\n return True, FinishReason.FINISH_REASON_EOS_TOKEN\n\n self.current_output += last_output\n for stop_sequence_criteria in self.stop_sequence_criterias:\n if stop_sequence_criteria(self.current_output):\n return True, FinishReason.FINISH_REASON_STOP_SEQUENCE\n\n return False, None\n\n @classmethod\n def from_pb(\n cls,\n pb: generate_pb2.StoppingCriteriaParameters,\n tokenizer: PreTrainedTokenizerBase,\n ) -> \"StoppingCriteria\":\n stop_sequence_criterias = [\n StopSequenceCriteria(sequence) for sequence in pb.stop_sequences\n ]\n return StoppingCriteria(\n tokenizer.eos_token_id, stop_sequence_criterias, pb.max_new_tokens\n )\n", "path": "server/text_generation_server/utils/tokens.py"}]}
1,723
333
gh_patches_debug_4842
rasdani/github-patches
git_diff
plone__Products.CMFPlone-3972
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TypeError thrown when during groups lookup The offending code is [here](https://github.com/plone/Products.CMFPlone/blob/308aa4d03ee6c0ce9d8119ce4c37955153f0bc6f/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py#L66). The traceback looks like this: ``` Traceback (innermost last): Module ZPublisher.WSGIPublisher, line 176, in transaction_pubevents Module ZPublisher.WSGIPublisher, line 385, in publish_module Module ZPublisher.WSGIPublisher, line 280, in publish Module ZPublisher.mapply, line 85, in mapply Module ZPublisher.WSGIPublisher, line 63, in call_object Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 57, in __call__ Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 54, in update Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 63, in getGroups TypeError: '<' not supported between instances of 'bool' and 'str' ``` The issue is that when there's a `None` value in the `groupResults` (which is anticipated in the sort code) the lambda returns `False` which fails to compare against the group title/name strings under Python 3. The list comprehension that defines `groupResults` should probably just filter out `None` values to avoid this issue. I'm not entirely sure what circumstances result in a `None` group value, but I am seeing it occur in a real world use case. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py` Content: ``` 1 from plone.base import PloneMessageFactory as _ 2 from Products.CMFCore.utils import getToolByName 3 from Products.CMFPlone.controlpanel.browser.usergroups import ( 4 UsersGroupsControlPanelView, 5 ) 6 from Products.CMFPlone.utils import normalizeString 7 from zExceptions import Forbidden 8 9 10 class UserMembershipControlPanel(UsersGroupsControlPanelView): 11 def update(self): 12 self.userid = getattr(self.request, "userid") 13 self.gtool = getToolByName(self, "portal_groups") 14 self.mtool = getToolByName(self, "portal_membership") 15 self.member = self.mtool.getMemberById(self.userid) 16 17 form = self.request.form 18 19 self.searchResults = [] 20 self.searchString = "" 21 self.newSearch = False 22 23 if form.get("form.submitted", False): 24 delete = form.get("delete", []) 25 if delete: 26 for groupname in delete: 27 self.gtool.removePrincipalFromGroup( 28 self.userid, groupname, self.request 29 ) 30 self.context.plone_utils.addPortalMessage(_("Changes made.")) 31 32 add = form.get("add", []) 33 if add: 34 for groupname in add: 35 group = self.gtool.getGroupById(groupname) 36 if "Manager" in group.getRoles() and not self.is_zope_manager: 37 raise Forbidden 38 39 self.gtool.addPrincipalToGroup(self.userid, groupname, self.request) 40 self.context.plone_utils.addPortalMessage(_("Changes made.")) 41 42 search = form.get("form.button.Search", None) is not None 43 findAll = ( 44 form.get("form.button.FindAll", None) is not None and not self.many_groups 45 ) 46 self.searchString = not findAll and form.get("searchstring", "") or "" 47 48 if findAll or not self.many_groups or self.searchString != "": 49 self.searchResults = self.getPotentialGroups(self.searchString) 50 51 if search or findAll: 52 self.newSearch = True 53 54 self.groups = self.getGroups() 55 56 def __call__(self): 57 self.update() 58 return self.index() 59 60 def getGroups(self): 61 groupResults = [ 62 self.gtool.getGroupById(m) 63 for m in self.gtool.getGroupsForPrincipal(self.member) 64 ] 65 groupResults.sort( 66 key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()) 67 ) 68 return [i for i in groupResults if i] 69 70 def getPotentialGroups(self, searchString): 71 ignoredGroups = [x.id for x in self.getGroups() if x is not None] 72 return self.membershipSearch( 73 searchString, searchUsers=False, ignore=ignoredGroups 74 ) 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py b/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py --- a/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py +++ b/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py @@ -63,7 +63,7 @@ for m in self.gtool.getGroupsForPrincipal(self.member) ] groupResults.sort( - key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()) + key=lambda x: normalizeString(x.getGroupTitleOrName()) if x else '' ) return [i for i in groupResults if i]
{"golden_diff": "diff --git a/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py b/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py\n--- a/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py\n+++ b/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py\n@@ -63,7 +63,7 @@\n for m in self.gtool.getGroupsForPrincipal(self.member)\n ]\n groupResults.sort(\n- key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n+ key=lambda x: normalizeString(x.getGroupTitleOrName()) if x else ''\n )\n return [i for i in groupResults if i]\n", "issue": "TypeError thrown when during groups lookup\nThe offending code is [here](https://github.com/plone/Products.CMFPlone/blob/308aa4d03ee6c0ce9d8119ce4c37955153f0bc6f/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py#L66). The traceback looks like this:\r\n```\r\nTraceback (innermost last):\r\n Module ZPublisher.WSGIPublisher, line 176, in transaction_pubevents\r\n Module ZPublisher.WSGIPublisher, line 385, in publish_module\r\n Module ZPublisher.WSGIPublisher, line 280, in publish\r\n Module ZPublisher.mapply, line 85, in mapply\r\n Module ZPublisher.WSGIPublisher, line 63, in call_object\r\n Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 57, in __call__\r\n Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 54, in update\r\n Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 63, in getGroups\r\nTypeError: '<' not supported between instances of 'bool' and 'str'\r\n```\r\n\r\nThe issue is that when there's a `None` value in the `groupResults` (which is anticipated in the sort code) the lambda returns `False` which fails to compare against the group title/name strings under Python 3. The list comprehension that defines `groupResults` should probably just filter out `None` values to avoid this issue. I'm not entirely sure what circumstances result in a `None` group value, but I am seeing it occur in a real world use case.\n", "before_files": [{"content": "from plone.base import PloneMessageFactory as _\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.controlpanel.browser.usergroups import (\n UsersGroupsControlPanelView,\n)\nfrom Products.CMFPlone.utils import normalizeString\nfrom zExceptions import Forbidden\n\n\nclass UserMembershipControlPanel(UsersGroupsControlPanelView):\n def update(self):\n self.userid = getattr(self.request, \"userid\")\n self.gtool = getToolByName(self, \"portal_groups\")\n self.mtool = getToolByName(self, \"portal_membership\")\n self.member = self.mtool.getMemberById(self.userid)\n\n form = self.request.form\n\n self.searchResults = []\n self.searchString = \"\"\n self.newSearch = False\n\n if form.get(\"form.submitted\", False):\n delete = form.get(\"delete\", [])\n if delete:\n for groupname in delete:\n self.gtool.removePrincipalFromGroup(\n self.userid, groupname, self.request\n )\n self.context.plone_utils.addPortalMessage(_(\"Changes made.\"))\n\n add = form.get(\"add\", [])\n if add:\n for groupname in add:\n group = self.gtool.getGroupById(groupname)\n if \"Manager\" in group.getRoles() and not self.is_zope_manager:\n raise Forbidden\n\n self.gtool.addPrincipalToGroup(self.userid, groupname, self.request)\n self.context.plone_utils.addPortalMessage(_(\"Changes made.\"))\n\n search = form.get(\"form.button.Search\", None) is not None\n findAll = (\n form.get(\"form.button.FindAll\", None) is not None and not self.many_groups\n )\n self.searchString = not findAll and form.get(\"searchstring\", \"\") or \"\"\n\n if findAll or not self.many_groups or self.searchString != \"\":\n self.searchResults = self.getPotentialGroups(self.searchString)\n\n if search or findAll:\n self.newSearch = True\n\n self.groups = self.getGroups()\n\n def __call__(self):\n self.update()\n return self.index()\n\n def getGroups(self):\n groupResults = [\n self.gtool.getGroupById(m)\n for m in self.gtool.getGroupsForPrincipal(self.member)\n ]\n groupResults.sort(\n key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n )\n return [i for i in groupResults if i]\n\n def getPotentialGroups(self, searchString):\n ignoredGroups = [x.id for x in self.getGroups() if x is not None]\n return self.membershipSearch(\n searchString, searchUsers=False, ignore=ignoredGroups\n )\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py"}], "after_files": [{"content": "from plone.base import PloneMessageFactory as _\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.controlpanel.browser.usergroups import (\n UsersGroupsControlPanelView,\n)\nfrom Products.CMFPlone.utils import normalizeString\nfrom zExceptions import Forbidden\n\n\nclass UserMembershipControlPanel(UsersGroupsControlPanelView):\n def update(self):\n self.userid = getattr(self.request, \"userid\")\n self.gtool = getToolByName(self, \"portal_groups\")\n self.mtool = getToolByName(self, \"portal_membership\")\n self.member = self.mtool.getMemberById(self.userid)\n\n form = self.request.form\n\n self.searchResults = []\n self.searchString = \"\"\n self.newSearch = False\n\n if form.get(\"form.submitted\", False):\n delete = form.get(\"delete\", [])\n if delete:\n for groupname in delete:\n self.gtool.removePrincipalFromGroup(\n self.userid, groupname, self.request\n )\n self.context.plone_utils.addPortalMessage(_(\"Changes made.\"))\n\n add = form.get(\"add\", [])\n if add:\n for groupname in add:\n group = self.gtool.getGroupById(groupname)\n if \"Manager\" in group.getRoles() and not self.is_zope_manager:\n raise Forbidden\n\n self.gtool.addPrincipalToGroup(self.userid, groupname, self.request)\n self.context.plone_utils.addPortalMessage(_(\"Changes made.\"))\n\n search = form.get(\"form.button.Search\", None) is not None\n findAll = (\n form.get(\"form.button.FindAll\", None) is not None and not self.many_groups\n )\n self.searchString = not findAll and form.get(\"searchstring\", \"\") or \"\"\n\n if findAll or not self.many_groups or self.searchString != \"\":\n self.searchResults = self.getPotentialGroups(self.searchString)\n\n if search or findAll:\n self.newSearch = True\n\n self.groups = self.getGroups()\n\n def __call__(self):\n self.update()\n return self.index()\n\n def getGroups(self):\n groupResults = [\n self.gtool.getGroupById(m)\n for m in self.gtool.getGroupsForPrincipal(self.member)\n ]\n groupResults.sort(\n key=lambda x: normalizeString(x.getGroupTitleOrName()) if x else ''\n )\n return [i for i in groupResults if i]\n\n def getPotentialGroups(self, searchString):\n ignoredGroups = [x.id for x in self.getGroups() if x is not None]\n return self.membershipSearch(\n searchString, searchUsers=False, ignore=ignoredGroups\n )\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py"}]}
1,356
158
gh_patches_debug_649
rasdani/github-patches
git_diff
pex-tool__pex-1997
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release 2.1.116 On the docket: + [x] The --resolve-local-platforms option does not work with --complete-platforms #1899 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pex/version.py` Content: ``` 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.115" 5 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.115" +__version__ = "2.1.116"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.115\"\n+__version__ = \"2.1.116\"\n", "issue": "Release 2.1.116\nOn the docket:\r\n+ [x] The --resolve-local-platforms option does not work with --complete-platforms #1899\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.115\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.116\"\n", "path": "pex/version.py"}]}
348
98
gh_patches_debug_14330
rasdani/github-patches
git_diff
pyca__cryptography-5825
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Version Scheme Change We've talked on and off for quite a few years about our versioning scheme for `cryptography`, but #5771 made it very clear that despite our [versioning documentation](https://cryptography.io/en/latest/api-stability.html#versioning) some users still assume it's [SemVer](https://semver.org) and are caught off guard by changes. I propose that we **switch to a [CalVer](https://calver.org) scheme** for the thirty fifth feature release (which we currently would call 3.5). This has the advantage of being unambiguously not semantic versioning but encoding some vaguely useful information (the year of release) in it. ### Alternate Choices **Stay the course** We've successfully used our versioning scheme for a bit over 7 years now and the probability of another monumental shift like this is low. There is, however, a constant (but low) background radiation of people who are tripped up by making assumptions about our versioning so I would argue against this choice. **SemVer** Switch to using an actual semantic versioning scheme. Without re-litigating years of conversations, I don't believe either @alex or myself are interested in this option. (See https://github.com/pyca/cryptography/issues/5801#issuecomment-776067787 for a bit of elaboration on our position) **Firefox/Chrome Versioning** (From @alex) "Its merits are that it's technically semver compatible, it's fairly predictable, and it communicates at least a little info on the relative age of the project. Its demerits are that it probably won't solve the real problem here, which was that folks were surprised a major change happened at all." --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cryptography/__about__.py` Content: ``` 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 6 __all__ = [ 7 "__title__", 8 "__summary__", 9 "__uri__", 10 "__version__", 11 "__author__", 12 "__email__", 13 "__license__", 14 "__copyright__", 15 ] 16 17 __title__ = "cryptography" 18 __summary__ = ( 19 "cryptography is a package which provides cryptographic recipes" 20 " and primitives to Python developers." 21 ) 22 __uri__ = "https://github.com/pyca/cryptography" 23 24 __version__ = "3.5.dev1" 25 26 __author__ = "The Python Cryptographic Authority and individual contributors" 27 __email__ = "cryptography-dev@python.org" 28 29 __license__ = "BSD or Apache License, Version 2.0" 30 __copyright__ = "Copyright 2013-2021 {}".format(__author__) 31 ``` Path: `vectors/cryptography_vectors/__about__.py` Content: ``` 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 __all__ = [ 6 "__title__", 7 "__summary__", 8 "__uri__", 9 "__version__", 10 "__author__", 11 "__email__", 12 "__license__", 13 "__copyright__", 14 ] 15 16 __title__ = "cryptography_vectors" 17 __summary__ = "Test vectors for the cryptography package." 18 19 __uri__ = "https://github.com/pyca/cryptography" 20 21 __version__ = "3.5.dev1" 22 23 __author__ = "The Python Cryptographic Authority and individual contributors" 24 __email__ = "cryptography-dev@python.org" 25 26 __license__ = "BSD or Apache License, Version 2.0" 27 __copyright__ = "Copyright 2013-2021 %s" % __author__ 28 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py --- a/src/cryptography/__about__.py +++ b/src/cryptography/__about__.py @@ -21,7 +21,7 @@ ) __uri__ = "https://github.com/pyca/cryptography" -__version__ = "3.5.dev1" +__version__ = "35.0.0.dev1" __author__ = "The Python Cryptographic Authority and individual contributors" __email__ = "cryptography-dev@python.org" diff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py --- a/vectors/cryptography_vectors/__about__.py +++ b/vectors/cryptography_vectors/__about__.py @@ -18,7 +18,7 @@ __uri__ = "https://github.com/pyca/cryptography" -__version__ = "3.5.dev1" +__version__ = "35.0.0.dev1" __author__ = "The Python Cryptographic Authority and individual contributors" __email__ = "cryptography-dev@python.org"
{"golden_diff": "diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py\n--- a/src/cryptography/__about__.py\n+++ b/src/cryptography/__about__.py\n@@ -21,7 +21,7 @@\n )\n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"3.5.dev1\"\n+__version__ = \"35.0.0.dev1\"\n \n __author__ = \"The Python Cryptographic Authority and individual contributors\"\n __email__ = \"cryptography-dev@python.org\"\ndiff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py\n--- a/vectors/cryptography_vectors/__about__.py\n+++ b/vectors/cryptography_vectors/__about__.py\n@@ -18,7 +18,7 @@\n \n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"3.5.dev1\"\n+__version__ = \"35.0.0.dev1\"\n \n __author__ = \"The Python Cryptographic Authority and individual contributors\"\n __email__ = \"cryptography-dev@python.org\"\n", "issue": "Version Scheme Change\nWe've talked on and off for quite a few years about our versioning scheme for `cryptography`, but #5771 made it very clear that despite our [versioning documentation](https://cryptography.io/en/latest/api-stability.html#versioning) some users still assume it's [SemVer](https://semver.org) and are caught off guard by changes. I propose that we **switch to a [CalVer](https://calver.org) scheme** for the thirty fifth feature release (which we currently would call 3.5). This has the advantage of being unambiguously not semantic versioning but encoding some vaguely useful information (the year of release) in it.\r\n\r\n### Alternate Choices\r\n**Stay the course**\r\nWe've successfully used our versioning scheme for a bit over 7 years now and the probability of another monumental shift like this is low. There is, however, a constant (but low) background radiation of people who are tripped up by making assumptions about our versioning so I would argue against this choice.\r\n\r\n**SemVer**\r\nSwitch to using an actual semantic versioning scheme. Without re-litigating years of conversations, I don't believe either @alex or myself are interested in this option. (See https://github.com/pyca/cryptography/issues/5801#issuecomment-776067787 for a bit of elaboration on our position)\r\n\r\n**Firefox/Chrome Versioning**\r\n(From @alex)\r\n\"Its merits are that it's technically semver compatible, it's fairly predictable, and it communicates at least a little info on the relative age of the project.\r\n\r\nIts demerits are that it probably won't solve the real problem here, which was that folks were surprised a major change happened at all.\"\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\n \"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\"\n)\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"3.5.dev1\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"cryptography-dev@python.org\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 {}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"3.5.dev1\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"cryptography-dev@python.org\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\n \"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\"\n)\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"35.0.0.dev1\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"cryptography-dev@python.org\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 {}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"35.0.0.dev1\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"cryptography-dev@python.org\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}]}
1,176
252
gh_patches_debug_26390
rasdani/github-patches
git_diff
jupyterhub__jupyterhub-835
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove or edit unreachable code block @minrk It looks like `_check_user_model` has a few code lines that don't seem to be reachable. Should we remove the lines after the return statement? https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/apihandlers/base.py#L144 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `jupyterhub/apihandlers/base.py` Content: ``` 1 """Base API handlers""" 2 # Copyright (c) Jupyter Development Team. 3 # Distributed under the terms of the Modified BSD License. 4 5 import json 6 7 from http.client import responses 8 9 from tornado import web 10 11 from ..handlers import BaseHandler 12 from ..utils import url_path_join 13 14 class APIHandler(BaseHandler): 15 16 def check_referer(self): 17 """Check Origin for cross-site API requests. 18 19 Copied from WebSocket with changes: 20 21 - allow unspecified host/referer (e.g. scripts) 22 """ 23 host = self.request.headers.get("Host") 24 referer = self.request.headers.get("Referer") 25 26 # If no header is provided, assume it comes from a script/curl. 27 # We are only concerned with cross-site browser stuff here. 28 if not host: 29 self.log.warning("Blocking API request with no host") 30 return False 31 if not referer: 32 self.log.warning("Blocking API request with no referer") 33 return False 34 35 host_path = url_path_join(host, self.hub.server.base_url) 36 referer_path = referer.split('://', 1)[-1] 37 if not (referer_path + '/').startswith(host_path): 38 self.log.warning("Blocking Cross Origin API request. Referer: %s, Host: %s", 39 referer, host_path) 40 return False 41 return True 42 43 def get_current_user_cookie(self): 44 """Override get_user_cookie to check Referer header""" 45 cookie_user = super().get_current_user_cookie() 46 # check referer only if there is a cookie user, 47 # avoiding misleading "Blocking Cross Origin" messages 48 # when there's no cookie set anyway. 49 if cookie_user and not self.check_referer(): 50 return None 51 return cookie_user 52 53 def get_json_body(self): 54 """Return the body of the request as JSON data.""" 55 if not self.request.body: 56 return None 57 body = self.request.body.strip().decode('utf-8') 58 try: 59 model = json.loads(body) 60 except Exception: 61 self.log.debug("Bad JSON: %r", body) 62 self.log.error("Couldn't parse JSON", exc_info=True) 63 raise web.HTTPError(400, 'Invalid JSON in body of request') 64 return model 65 66 def write_error(self, status_code, **kwargs): 67 """Write JSON errors instead of HTML""" 68 exc_info = kwargs.get('exc_info') 69 message = '' 70 status_message = responses.get(status_code, 'Unknown Error') 71 if exc_info: 72 exception = exc_info[1] 73 # get the custom message, if defined 74 try: 75 message = exception.log_message % exception.args 76 except Exception: 77 pass 78 79 # construct the custom reason, if defined 80 reason = getattr(exception, 'reason', '') 81 if reason: 82 status_message = reason 83 self.set_header('Content-Type', 'application/json') 84 self.write(json.dumps({ 85 'status': status_code, 86 'message': message or status_message, 87 })) 88 89 def user_model(self, user): 90 """Get the JSON model for a User object""" 91 model = { 92 'name': user.name, 93 'admin': user.admin, 94 'groups': [ g.name for g in user.groups ], 95 'server': user.url if user.running else None, 96 'pending': None, 97 'last_activity': user.last_activity.isoformat(), 98 } 99 if user.spawn_pending: 100 model['pending'] = 'spawn' 101 elif user.stop_pending: 102 model['pending'] = 'stop' 103 return model 104 105 def group_model(self, group): 106 """Get the JSON model for a Group object""" 107 return { 108 'name': group.name, 109 'users': [ u.name for u in group.users ] 110 } 111 112 _user_model_types = { 113 'name': str, 114 'admin': bool, 115 'groups': list, 116 } 117 118 _group_model_types = { 119 'name': str, 120 'users': list, 121 } 122 123 def _check_model(self, model, model_types, name): 124 """Check a model provided by a REST API request 125 126 Args: 127 model (dict): user-provided model 128 model_types (dict): dict of key:type used to validate types and keys 129 name (str): name of the model, used in error messages 130 """ 131 if not isinstance(model, dict): 132 raise web.HTTPError(400, "Invalid JSON data: %r" % model) 133 if not set(model).issubset(set(model_types)): 134 raise web.HTTPError(400, "Invalid JSON keys: %r" % model) 135 for key, value in model.items(): 136 if not isinstance(value, model_types[key]): 137 raise web.HTTPError(400, "%s.%s must be %s, not: %r" % ( 138 name, key, model_types[key], type(value) 139 )) 140 141 def _check_user_model(self, model): 142 """Check a request-provided user model from a REST API""" 143 return self._check_model(model, self._user_model_types, 'user') 144 for groupname in model.get('groups', []): 145 if not isinstance(groupname, str): 146 raise web.HTTPError(400, "group names must be str, not %r" % type(groupname)) 147 148 def _check_group_model(self, model): 149 """Check a request-provided user model from a REST API""" 150 self._check_model(model, self._group_model_types, 'group') 151 for username in model.get('users', []): 152 if not isinstance(username, str): 153 raise web.HTTPError(400, "usernames must be str, not %r" % type(groupname)) 154 155 def options(self, *args, **kwargs): 156 self.set_header('Access-Control-Allow-Headers', 'accept, content-type') 157 self.finish() 158 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/jupyterhub/apihandlers/base.py b/jupyterhub/apihandlers/base.py --- a/jupyterhub/apihandlers/base.py +++ b/jupyterhub/apihandlers/base.py @@ -140,19 +140,18 @@ def _check_user_model(self, model): """Check a request-provided user model from a REST API""" - return self._check_model(model, self._user_model_types, 'user') - for groupname in model.get('groups', []): - if not isinstance(groupname, str): - raise web.HTTPError(400, "group names must be str, not %r" % type(groupname)) + self._check_model(model, self._user_model_types, 'user') + for username in model.get('users', []): + if not isinstance(username, str): + raise web.HTTPError(400, ("usernames must be str, not %r", type(username))) def _check_group_model(self, model): - """Check a request-provided user model from a REST API""" + """Check a request-provided group model from a REST API""" self._check_model(model, self._group_model_types, 'group') - for username in model.get('users', []): - if not isinstance(username, str): - raise web.HTTPError(400, "usernames must be str, not %r" % type(groupname)) + for groupname in model.get('groups', []): + if not isinstance(groupname, str): + raise web.HTTPError(400, ("group names must be str, not %r", type(groupname))) def options(self, *args, **kwargs): self.set_header('Access-Control-Allow-Headers', 'accept, content-type') self.finish() - \ No newline at end of file
{"golden_diff": "diff --git a/jupyterhub/apihandlers/base.py b/jupyterhub/apihandlers/base.py\n--- a/jupyterhub/apihandlers/base.py\n+++ b/jupyterhub/apihandlers/base.py\n@@ -140,19 +140,18 @@\n \n def _check_user_model(self, model):\n \"\"\"Check a request-provided user model from a REST API\"\"\"\n- return self._check_model(model, self._user_model_types, 'user')\n- for groupname in model.get('groups', []):\n- if not isinstance(groupname, str):\n- raise web.HTTPError(400, \"group names must be str, not %r\" % type(groupname))\n+ self._check_model(model, self._user_model_types, 'user')\n+ for username in model.get('users', []):\n+ if not isinstance(username, str):\n+ raise web.HTTPError(400, (\"usernames must be str, not %r\", type(username)))\n \n def _check_group_model(self, model):\n- \"\"\"Check a request-provided user model from a REST API\"\"\"\n+ \"\"\"Check a request-provided group model from a REST API\"\"\"\n self._check_model(model, self._group_model_types, 'group')\n- for username in model.get('users', []):\n- if not isinstance(username, str):\n- raise web.HTTPError(400, \"usernames must be str, not %r\" % type(groupname))\n+ for groupname in model.get('groups', []):\n+ if not isinstance(groupname, str):\n+ raise web.HTTPError(400, (\"group names must be str, not %r\", type(groupname)))\n \n def options(self, *args, **kwargs):\n self.set_header('Access-Control-Allow-Headers', 'accept, content-type')\n self.finish()\n- \n\\ No newline at end of file\n", "issue": "Remove or edit unreachable code block\n@minrk It looks like `_check_user_model` has a few code lines that don't seem to be reachable. Should we remove the lines after the return statement? https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/apihandlers/base.py#L144\n\n", "before_files": [{"content": "\"\"\"Base API handlers\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\n\nfrom http.client import responses\n\nfrom tornado import web\n\nfrom ..handlers import BaseHandler\nfrom ..utils import url_path_join\n\nclass APIHandler(BaseHandler):\n\n def check_referer(self):\n \"\"\"Check Origin for cross-site API requests.\n \n Copied from WebSocket with changes:\n \n - allow unspecified host/referer (e.g. scripts)\n \"\"\"\n host = self.request.headers.get(\"Host\")\n referer = self.request.headers.get(\"Referer\")\n\n # If no header is provided, assume it comes from a script/curl.\n # We are only concerned with cross-site browser stuff here.\n if not host:\n self.log.warning(\"Blocking API request with no host\")\n return False\n if not referer:\n self.log.warning(\"Blocking API request with no referer\")\n return False\n \n host_path = url_path_join(host, self.hub.server.base_url)\n referer_path = referer.split('://', 1)[-1]\n if not (referer_path + '/').startswith(host_path):\n self.log.warning(\"Blocking Cross Origin API request. Referer: %s, Host: %s\",\n referer, host_path)\n return False\n return True\n \n def get_current_user_cookie(self):\n \"\"\"Override get_user_cookie to check Referer header\"\"\"\n cookie_user = super().get_current_user_cookie()\n # check referer only if there is a cookie user,\n # avoiding misleading \"Blocking Cross Origin\" messages\n # when there's no cookie set anyway.\n if cookie_user and not self.check_referer():\n return None\n return cookie_user\n\n def get_json_body(self):\n \"\"\"Return the body of the request as JSON data.\"\"\"\n if not self.request.body:\n return None\n body = self.request.body.strip().decode('utf-8')\n try:\n model = json.loads(body)\n except Exception:\n self.log.debug(\"Bad JSON: %r\", body)\n self.log.error(\"Couldn't parse JSON\", exc_info=True)\n raise web.HTTPError(400, 'Invalid JSON in body of request')\n return model\n \n def write_error(self, status_code, **kwargs):\n \"\"\"Write JSON errors instead of HTML\"\"\"\n exc_info = kwargs.get('exc_info')\n message = ''\n status_message = responses.get(status_code, 'Unknown Error')\n if exc_info:\n exception = exc_info[1]\n # get the custom message, if defined\n try:\n message = exception.log_message % exception.args\n except Exception:\n pass\n\n # construct the custom reason, if defined\n reason = getattr(exception, 'reason', '')\n if reason:\n status_message = reason\n self.set_header('Content-Type', 'application/json')\n self.write(json.dumps({\n 'status': status_code,\n 'message': message or status_message,\n }))\n\n def user_model(self, user):\n \"\"\"Get the JSON model for a User object\"\"\"\n model = {\n 'name': user.name,\n 'admin': user.admin,\n 'groups': [ g.name for g in user.groups ],\n 'server': user.url if user.running else None,\n 'pending': None,\n 'last_activity': user.last_activity.isoformat(),\n }\n if user.spawn_pending:\n model['pending'] = 'spawn'\n elif user.stop_pending:\n model['pending'] = 'stop'\n return model\n\n def group_model(self, group):\n \"\"\"Get the JSON model for a Group object\"\"\"\n return {\n 'name': group.name,\n 'users': [ u.name for u in group.users ]\n }\n\n _user_model_types = {\n 'name': str,\n 'admin': bool,\n 'groups': list,\n }\n\n _group_model_types = {\n 'name': str,\n 'users': list,\n }\n\n def _check_model(self, model, model_types, name):\n \"\"\"Check a model provided by a REST API request\n \n Args:\n model (dict): user-provided model\n model_types (dict): dict of key:type used to validate types and keys\n name (str): name of the model, used in error messages\n \"\"\"\n if not isinstance(model, dict):\n raise web.HTTPError(400, \"Invalid JSON data: %r\" % model)\n if not set(model).issubset(set(model_types)):\n raise web.HTTPError(400, \"Invalid JSON keys: %r\" % model)\n for key, value in model.items():\n if not isinstance(value, model_types[key]):\n raise web.HTTPError(400, \"%s.%s must be %s, not: %r\" % (\n name, key, model_types[key], type(value)\n ))\n\n def _check_user_model(self, model):\n \"\"\"Check a request-provided user model from a REST API\"\"\"\n return self._check_model(model, self._user_model_types, 'user')\n for groupname in model.get('groups', []):\n if not isinstance(groupname, str):\n raise web.HTTPError(400, \"group names must be str, not %r\" % type(groupname))\n\n def _check_group_model(self, model):\n \"\"\"Check a request-provided user model from a REST API\"\"\"\n self._check_model(model, self._group_model_types, 'group')\n for username in model.get('users', []):\n if not isinstance(username, str):\n raise web.HTTPError(400, \"usernames must be str, not %r\" % type(groupname))\n\n def options(self, *args, **kwargs):\n self.set_header('Access-Control-Allow-Headers', 'accept, content-type')\n self.finish()\n ", "path": "jupyterhub/apihandlers/base.py"}], "after_files": [{"content": "\"\"\"Base API handlers\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\n\nfrom http.client import responses\n\nfrom tornado import web\n\nfrom ..handlers import BaseHandler\nfrom ..utils import url_path_join\n\nclass APIHandler(BaseHandler):\n\n def check_referer(self):\n \"\"\"Check Origin for cross-site API requests.\n \n Copied from WebSocket with changes:\n \n - allow unspecified host/referer (e.g. scripts)\n \"\"\"\n host = self.request.headers.get(\"Host\")\n referer = self.request.headers.get(\"Referer\")\n\n # If no header is provided, assume it comes from a script/curl.\n # We are only concerned with cross-site browser stuff here.\n if not host:\n self.log.warning(\"Blocking API request with no host\")\n return False\n if not referer:\n self.log.warning(\"Blocking API request with no referer\")\n return False\n \n host_path = url_path_join(host, self.hub.server.base_url)\n referer_path = referer.split('://', 1)[-1]\n if not (referer_path + '/').startswith(host_path):\n self.log.warning(\"Blocking Cross Origin API request. Referer: %s, Host: %s\",\n referer, host_path)\n return False\n return True\n \n def get_current_user_cookie(self):\n \"\"\"Override get_user_cookie to check Referer header\"\"\"\n cookie_user = super().get_current_user_cookie()\n # check referer only if there is a cookie user,\n # avoiding misleading \"Blocking Cross Origin\" messages\n # when there's no cookie set anyway.\n if cookie_user and not self.check_referer():\n return None\n return cookie_user\n\n def get_json_body(self):\n \"\"\"Return the body of the request as JSON data.\"\"\"\n if not self.request.body:\n return None\n body = self.request.body.strip().decode('utf-8')\n try:\n model = json.loads(body)\n except Exception:\n self.log.debug(\"Bad JSON: %r\", body)\n self.log.error(\"Couldn't parse JSON\", exc_info=True)\n raise web.HTTPError(400, 'Invalid JSON in body of request')\n return model\n \n def write_error(self, status_code, **kwargs):\n \"\"\"Write JSON errors instead of HTML\"\"\"\n exc_info = kwargs.get('exc_info')\n message = ''\n status_message = responses.get(status_code, 'Unknown Error')\n if exc_info:\n exception = exc_info[1]\n # get the custom message, if defined\n try:\n message = exception.log_message % exception.args\n except Exception:\n pass\n\n # construct the custom reason, if defined\n reason = getattr(exception, 'reason', '')\n if reason:\n status_message = reason\n self.set_header('Content-Type', 'application/json')\n self.write(json.dumps({\n 'status': status_code,\n 'message': message or status_message,\n }))\n\n def user_model(self, user):\n \"\"\"Get the JSON model for a User object\"\"\"\n model = {\n 'name': user.name,\n 'admin': user.admin,\n 'groups': [ g.name for g in user.groups ],\n 'server': user.url if user.running else None,\n 'pending': None,\n 'last_activity': user.last_activity.isoformat(),\n }\n if user.spawn_pending:\n model['pending'] = 'spawn'\n elif user.stop_pending:\n model['pending'] = 'stop'\n return model\n\n def group_model(self, group):\n \"\"\"Get the JSON model for a Group object\"\"\"\n return {\n 'name': group.name,\n 'users': [ u.name for u in group.users ]\n }\n\n _user_model_types = {\n 'name': str,\n 'admin': bool,\n 'groups': list,\n }\n\n _group_model_types = {\n 'name': str,\n 'users': list,\n }\n\n def _check_model(self, model, model_types, name):\n \"\"\"Check a model provided by a REST API request\n \n Args:\n model (dict): user-provided model\n model_types (dict): dict of key:type used to validate types and keys\n name (str): name of the model, used in error messages\n \"\"\"\n if not isinstance(model, dict):\n raise web.HTTPError(400, \"Invalid JSON data: %r\" % model)\n if not set(model).issubset(set(model_types)):\n raise web.HTTPError(400, \"Invalid JSON keys: %r\" % model)\n for key, value in model.items():\n if not isinstance(value, model_types[key]):\n raise web.HTTPError(400, \"%s.%s must be %s, not: %r\" % (\n name, key, model_types[key], type(value)\n ))\n\n def _check_user_model(self, model):\n \"\"\"Check a request-provided user model from a REST API\"\"\"\n self._check_model(model, self._user_model_types, 'user')\n for username in model.get('users', []):\n if not isinstance(username, str):\n raise web.HTTPError(400, (\"usernames must be str, not %r\", type(username)))\n\n def _check_group_model(self, model):\n \"\"\"Check a request-provided group model from a REST API\"\"\"\n self._check_model(model, self._group_model_types, 'group')\n for groupname in model.get('groups', []):\n if not isinstance(groupname, str):\n raise web.HTTPError(400, (\"group names must be str, not %r\", type(groupname)))\n\n def options(self, *args, **kwargs):\n self.set_header('Access-Control-Allow-Headers', 'accept, content-type')\n self.finish()\n", "path": "jupyterhub/apihandlers/base.py"}]}
1,952
408
gh_patches_debug_25209
rasdani/github-patches
git_diff
urllib3__urllib3-516
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support body param in RequestMethods.request Easiest way to do this is by avoiding defining a body kw when no fields are given, then if both are given it will naturally raise a "passed twice" error. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `urllib3/request.py` Content: ``` 1 try: 2 from urllib.parse import urlencode 3 except ImportError: 4 from urllib import urlencode 5 6 from .filepost import encode_multipart_formdata 7 8 9 __all__ = ['RequestMethods'] 10 11 12 class RequestMethods(object): 13 """ 14 Convenience mixin for classes who implement a :meth:`urlopen` method, such 15 as :class:`~urllib3.connectionpool.HTTPConnectionPool` and 16 :class:`~urllib3.poolmanager.PoolManager`. 17 18 Provides behavior for making common types of HTTP request methods and 19 decides which type of request field encoding to use. 20 21 Specifically, 22 23 :meth:`.request_encode_url` is for sending requests whose fields are 24 encoded in the URL (such as GET, HEAD, DELETE). 25 26 :meth:`.request_encode_body` is for sending requests whose fields are 27 encoded in the *body* of the request using multipart or www-form-urlencoded 28 (such as for POST, PUT, PATCH). 29 30 :meth:`.request` is for making any kind of request, it will look up the 31 appropriate encoding format and use one of the above two methods to make 32 the request. 33 34 Initializer parameters: 35 36 :param headers: 37 Headers to include with all requests, unless other headers are given 38 explicitly. 39 """ 40 41 _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) 42 43 def __init__(self, headers=None): 44 self.headers = headers or {} 45 46 def urlopen(self, method, url, body=None, headers=None, 47 encode_multipart=True, multipart_boundary=None, 48 **kw): # Abstract 49 raise NotImplemented("Classes extending RequestMethods must implement " 50 "their own ``urlopen`` method.") 51 52 def request(self, method, url, fields=None, headers=None, **urlopen_kw): 53 """ 54 Make a request using :meth:`urlopen` with the appropriate encoding of 55 ``fields`` based on the ``method`` used. 56 57 This is a convenience method that requires the least amount of manual 58 effort. It can be used in most situations, while still having the 59 option to drop down to more specific methods when necessary, such as 60 :meth:`request_encode_url`, :meth:`request_encode_body`, 61 or even the lowest level :meth:`urlopen`. 62 """ 63 method = method.upper() 64 65 if method in self._encode_url_methods: 66 return self.request_encode_url(method, url, fields=fields, 67 headers=headers, 68 **urlopen_kw) 69 else: 70 return self.request_encode_body(method, url, fields=fields, 71 headers=headers, 72 **urlopen_kw) 73 74 def request_encode_url(self, method, url, fields=None, **urlopen_kw): 75 """ 76 Make a request using :meth:`urlopen` with the ``fields`` encoded in 77 the url. This is useful for request methods like GET, HEAD, DELETE, etc. 78 """ 79 if fields: 80 url += '?' + urlencode(fields) 81 return self.urlopen(method, url, **urlopen_kw) 82 83 def request_encode_body(self, method, url, fields=None, headers=None, 84 encode_multipart=True, multipart_boundary=None, 85 **urlopen_kw): 86 """ 87 Make a request using :meth:`urlopen` with the ``fields`` encoded in 88 the body. This is useful for request methods like POST, PUT, PATCH, etc. 89 90 When ``encode_multipart=True`` (default), then 91 :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode 92 the payload with the appropriate content type. Otherwise 93 :meth:`urllib.urlencode` is used with the 94 'application/x-www-form-urlencoded' content type. 95 96 Multipart encoding must be used when posting files, and it's reasonably 97 safe to use it in other times too. However, it may break request 98 signing, such as with OAuth. 99 100 Supports an optional ``fields`` parameter of key/value strings AND 101 key/filetuple. A filetuple is a (filename, data, MIME type) tuple where 102 the MIME type is optional. For example:: 103 104 fields = { 105 'foo': 'bar', 106 'fakefile': ('foofile.txt', 'contents of foofile'), 107 'realfile': ('barfile.txt', open('realfile').read()), 108 'typedfile': ('bazfile.bin', open('bazfile').read(), 109 'image/jpeg'), 110 'nonamefile': 'contents of nonamefile field', 111 } 112 113 When uploading a file, providing a filename (the first parameter of the 114 tuple) is optional but recommended to best mimick behavior of browsers. 115 116 Note that if ``headers`` are supplied, the 'Content-Type' header will 117 be overwritten because it depends on the dynamic random boundary string 118 which is used to compose the body of the request. The random boundary 119 string can be explicitly set with the ``multipart_boundary`` parameter. 120 """ 121 if encode_multipart: 122 body, content_type = encode_multipart_formdata( 123 fields or {}, boundary=multipart_boundary) 124 else: 125 body, content_type = (urlencode(fields or {}), 126 'application/x-www-form-urlencoded') 127 128 if headers is None: 129 headers = self.headers 130 131 headers_ = {'Content-Type': content_type} 132 headers_.update(headers) 133 134 return self.urlopen(method, url, body=body, headers=headers_, 135 **urlopen_kw) 136 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/urllib3/request.py b/urllib3/request.py --- a/urllib3/request.py +++ b/urllib3/request.py @@ -118,18 +118,24 @@ which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter. """ - if encode_multipart: - body, content_type = encode_multipart_formdata( - fields or {}, boundary=multipart_boundary) - else: - body, content_type = (urlencode(fields or {}), - 'application/x-www-form-urlencoded') - if headers is None: headers = self.headers - headers_ = {'Content-Type': content_type} - headers_.update(headers) + extra_kw = {'headers': {}} + + if fields: + if 'body' in urlopen_kw: + raise TypeError('request got values for both \'fields\' and \'body\', can only specify one.') + + if encode_multipart: + body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) + else: + body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' + + extra_kw['body'] = body + extra_kw['headers'] = {'Content-Type': content_type} + + extra_kw['headers'].update(headers) + extra_kw.update(urlopen_kw) - return self.urlopen(method, url, body=body, headers=headers_, - **urlopen_kw) + return self.urlopen(method, url, **extra_kw)
{"golden_diff": "diff --git a/urllib3/request.py b/urllib3/request.py\n--- a/urllib3/request.py\n+++ b/urllib3/request.py\n@@ -118,18 +118,24 @@\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n- if encode_multipart:\n- body, content_type = encode_multipart_formdata(\n- fields or {}, boundary=multipart_boundary)\n- else:\n- body, content_type = (urlencode(fields or {}),\n- 'application/x-www-form-urlencoded')\n-\n if headers is None:\n headers = self.headers\n \n- headers_ = {'Content-Type': content_type}\n- headers_.update(headers)\n+ extra_kw = {'headers': {}}\n+\n+ if fields:\n+ if 'body' in urlopen_kw:\n+ raise TypeError('request got values for both \\'fields\\' and \\'body\\', can only specify one.')\n+\n+ if encode_multipart:\n+ body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)\n+ else:\n+ body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'\n+\n+ extra_kw['body'] = body\n+ extra_kw['headers'] = {'Content-Type': content_type}\n+\n+ extra_kw['headers'].update(headers)\n+ extra_kw.update(urlopen_kw)\n \n- return self.urlopen(method, url, body=body, headers=headers_,\n- **urlopen_kw)\n+ return self.urlopen(method, url, **extra_kw)\n", "issue": "Support body param in RequestMethods.request\nEasiest way to do this is by avoiding defining a body kw when no fields are given, then if both are given it will naturally raise a \"passed twice\" error.\n\n", "before_files": [{"content": "try:\n from urllib.parse import urlencode\nexcept ImportError:\n from urllib import urlencode\n\nfrom .filepost import encode_multipart_formdata\n\n\n__all__ = ['RequestMethods']\n\n\nclass RequestMethods(object):\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`~urllib3.connectionpool.HTTPConnectionPool` and\n :class:`~urllib3.poolmanager.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])\n\n def __init__(self, headers=None):\n self.headers = headers or {}\n\n def urlopen(self, method, url, body=None, headers=None,\n encode_multipart=True, multipart_boundary=None,\n **kw): # Abstract\n raise NotImplemented(\"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\")\n\n def request(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n if method in self._encode_url_methods:\n return self.request_encode_url(method, url, fields=fields,\n headers=headers,\n **urlopen_kw)\n else:\n return self.request_encode_body(method, url, fields=fields,\n headers=headers,\n **urlopen_kw)\n\n def request_encode_url(self, method, url, fields=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if fields:\n url += '?' + urlencode(fields)\n return self.urlopen(method, url, **urlopen_kw)\n\n def request_encode_body(self, method, url, fields=None, headers=None,\n encode_multipart=True, multipart_boundary=None,\n **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :meth:`urllib.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimick behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields or {}, boundary=multipart_boundary)\n else:\n body, content_type = (urlencode(fields or {}),\n 'application/x-www-form-urlencoded')\n\n if headers is None:\n headers = self.headers\n\n headers_ = {'Content-Type': content_type}\n headers_.update(headers)\n\n return self.urlopen(method, url, body=body, headers=headers_,\n **urlopen_kw)\n", "path": "urllib3/request.py"}], "after_files": [{"content": "try:\n from urllib.parse import urlencode\nexcept ImportError:\n from urllib import urlencode\n\nfrom .filepost import encode_multipart_formdata\n\n\n__all__ = ['RequestMethods']\n\n\nclass RequestMethods(object):\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`~urllib3.connectionpool.HTTPConnectionPool` and\n :class:`~urllib3.poolmanager.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])\n\n def __init__(self, headers=None):\n self.headers = headers or {}\n\n def urlopen(self, method, url, body=None, headers=None,\n encode_multipart=True, multipart_boundary=None,\n **kw): # Abstract\n raise NotImplemented(\"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\")\n\n def request(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n if method in self._encode_url_methods:\n return self.request_encode_url(method, url, fields=fields,\n headers=headers,\n **urlopen_kw)\n else:\n return self.request_encode_body(method, url, fields=fields,\n headers=headers,\n **urlopen_kw)\n\n def request_encode_url(self, method, url, fields=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if fields:\n url += '?' + urlencode(fields)\n return self.urlopen(method, url, **urlopen_kw)\n\n def request_encode_body(self, method, url, fields=None, headers=None,\n encode_multipart=True, multipart_boundary=None,\n **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :meth:`urllib.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimick behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {'headers': {}}\n\n if fields:\n if 'body' in urlopen_kw:\n raise TypeError('request got values for both \\'fields\\' and \\'body\\', can only specify one.')\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)\n else:\n body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'\n\n extra_kw['body'] = body\n extra_kw['headers'] = {'Content-Type': content_type}\n\n extra_kw['headers'].update(headers)\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)\n", "path": "urllib3/request.py"}]}
1,766
351
gh_patches_debug_23722
rasdani/github-patches
git_diff
intel__dffml-585
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- docs: source: New File source tutorial The question of how to make a new data source came up [recently](https://gitter.im/dffml/community?at=5e0e4bfbfd580457e7b3e26a). We should have multiple tutorials for sources. We're going to use https://github.com/intel/dffml/issues/551 as the example for this tutorial, so that issue needs to be closed first. You'll want to make a `docs/tutorials/sources/index.rst` Then take this exist tutorial and move it under `docs/tutoirials/sources/complex.rst` You're new one will go under `sources/file.rst` Te idea here is just to explain how you can write a source which reads data from some new filetype, with our example being the `.ini` Try to follow the new model tutorial format, where we do `literalinclude` and `:lines:` to talk about individual sections of the files we're writing. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dffml/source/ini.py` Content: ``` 1 from configparser import ConfigParser 2 3 from ..base import config 4 from ..record import Record 5 from .file import FileSource 6 from .memory import MemorySource 7 from ..util.data import parser_helper 8 from ..util.entrypoint import entrypoint 9 10 11 @config 12 class INISourceConfig: 13 filename: str 14 readwrite: bool = False 15 allowempty: bool = False 16 17 18 @entrypoint("ini") 19 class INISource(FileSource, MemorySource): 20 """ 21 Source to read files in .ini format. 22 """ 23 24 CONFIG = INISourceConfig 25 26 async def load_fd(self, ifile): 27 # Creating an instance of configparser 28 parser = ConfigParser() 29 # Read from a file object 30 parser.read_file(ifile) 31 # Get all the sections present in the file 32 sections = parser.sections() 33 34 self.mem = {} 35 36 # Go over each section 37 for section in sections: 38 # Get data under each section as a dict 39 temp_dict = {} 40 for k, v in parser.items(section): 41 temp_dict[k] = parser_helper(v) 42 # Each section used as a record 43 self.mem[str(section)] = Record( 44 str(section), data={"features": temp_dict}, 45 ) 46 47 self.logger.debug("%r loaded %d sections", self, len(self.mem)) 48 49 async def dump_fd(self, fd): 50 # Create an instance of configparser 51 parser = ConfigParser() 52 53 # Go over each section and record in mem 54 for section, record in self.mem.items(): 55 # Get each section data as a dict 56 section_data = record.features() 57 if section not in parser.keys(): 58 # If section does not exist add new section 59 parser.add_section(section) 60 # Set section data 61 parser[section] = section_data 62 63 # Write to the fileobject 64 parser.write(fd) 65 66 self.logger.debug("%r saved %d sections", self, len(self.mem)) 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dffml/source/ini.py b/dffml/source/ini.py --- a/dffml/source/ini.py +++ b/dffml/source/ini.py @@ -23,11 +23,11 @@ CONFIG = INISourceConfig - async def load_fd(self, ifile): + async def load_fd(self, fileobj): # Creating an instance of configparser parser = ConfigParser() # Read from a file object - parser.read_file(ifile) + parser.read_file(fileobj) # Get all the sections present in the file sections = parser.sections() @@ -46,7 +46,7 @@ self.logger.debug("%r loaded %d sections", self, len(self.mem)) - async def dump_fd(self, fd): + async def dump_fd(self, fileobj): # Create an instance of configparser parser = ConfigParser() @@ -61,6 +61,6 @@ parser[section] = section_data # Write to the fileobject - parser.write(fd) + parser.write(fileobj) self.logger.debug("%r saved %d sections", self, len(self.mem))
{"golden_diff": "diff --git a/dffml/source/ini.py b/dffml/source/ini.py\n--- a/dffml/source/ini.py\n+++ b/dffml/source/ini.py\n@@ -23,11 +23,11 @@\n \n CONFIG = INISourceConfig\n \n- async def load_fd(self, ifile):\n+ async def load_fd(self, fileobj):\n # Creating an instance of configparser\n parser = ConfigParser()\n # Read from a file object\n- parser.read_file(ifile)\n+ parser.read_file(fileobj)\n # Get all the sections present in the file\n sections = parser.sections()\n \n@@ -46,7 +46,7 @@\n \n self.logger.debug(\"%r loaded %d sections\", self, len(self.mem))\n \n- async def dump_fd(self, fd):\n+ async def dump_fd(self, fileobj):\n # Create an instance of configparser\n parser = ConfigParser()\n \n@@ -61,6 +61,6 @@\n parser[section] = section_data\n \n # Write to the fileobject\n- parser.write(fd)\n+ parser.write(fileobj)\n \n self.logger.debug(\"%r saved %d sections\", self, len(self.mem))\n", "issue": "docs: source: New File source tutorial\nThe question of how to make a new data source came up [recently](https://gitter.im/dffml/community?at=5e0e4bfbfd580457e7b3e26a). We should have multiple tutorials for sources.\r\n\r\nWe're going to use https://github.com/intel/dffml/issues/551 as the example for this tutorial, so that issue needs to be closed first.\r\n\r\nYou'll want to make a\r\n\r\n`docs/tutorials/sources/index.rst`\r\n\r\nThen take this exist tutorial and move it under\r\n\r\n`docs/tutoirials/sources/complex.rst`\r\n\r\nYou're new one will go under \r\n\r\n`sources/file.rst`\r\n\r\nTe idea here is just to explain how you can write a source which reads data from some new filetype, with our example being the `.ini`\r\n\r\nTry to follow the new model tutorial format, where we do `literalinclude` and `:lines:` to talk about individual sections of the files we're writing.\n", "before_files": [{"content": "from configparser import ConfigParser\n\nfrom ..base import config\nfrom ..record import Record\nfrom .file import FileSource\nfrom .memory import MemorySource\nfrom ..util.data import parser_helper\nfrom ..util.entrypoint import entrypoint\n\n\n@config\nclass INISourceConfig:\n filename: str\n readwrite: bool = False\n allowempty: bool = False\n\n\n@entrypoint(\"ini\")\nclass INISource(FileSource, MemorySource):\n \"\"\"\n Source to read files in .ini format.\n \"\"\"\n\n CONFIG = INISourceConfig\n\n async def load_fd(self, ifile):\n # Creating an instance of configparser\n parser = ConfigParser()\n # Read from a file object\n parser.read_file(ifile)\n # Get all the sections present in the file\n sections = parser.sections()\n\n self.mem = {}\n\n # Go over each section\n for section in sections:\n # Get data under each section as a dict\n temp_dict = {}\n for k, v in parser.items(section):\n temp_dict[k] = parser_helper(v)\n # Each section used as a record\n self.mem[str(section)] = Record(\n str(section), data={\"features\": temp_dict},\n )\n\n self.logger.debug(\"%r loaded %d sections\", self, len(self.mem))\n\n async def dump_fd(self, fd):\n # Create an instance of configparser\n parser = ConfigParser()\n\n # Go over each section and record in mem\n for section, record in self.mem.items():\n # Get each section data as a dict\n section_data = record.features()\n if section not in parser.keys():\n # If section does not exist add new section\n parser.add_section(section)\n # Set section data\n parser[section] = section_data\n\n # Write to the fileobject\n parser.write(fd)\n\n self.logger.debug(\"%r saved %d sections\", self, len(self.mem))\n", "path": "dffml/source/ini.py"}], "after_files": [{"content": "from configparser import ConfigParser\n\nfrom ..base import config\nfrom ..record import Record\nfrom .file import FileSource\nfrom .memory import MemorySource\nfrom ..util.data import parser_helper\nfrom ..util.entrypoint import entrypoint\n\n\n@config\nclass INISourceConfig:\n filename: str\n readwrite: bool = False\n allowempty: bool = False\n\n\n@entrypoint(\"ini\")\nclass INISource(FileSource, MemorySource):\n \"\"\"\n Source to read files in .ini format.\n \"\"\"\n\n CONFIG = INISourceConfig\n\n async def load_fd(self, fileobj):\n # Creating an instance of configparser\n parser = ConfigParser()\n # Read from a file object\n parser.read_file(fileobj)\n # Get all the sections present in the file\n sections = parser.sections()\n\n self.mem = {}\n\n # Go over each section\n for section in sections:\n # Get data under each section as a dict\n temp_dict = {}\n for k, v in parser.items(section):\n temp_dict[k] = parser_helper(v)\n # Each section used as a record\n self.mem[str(section)] = Record(\n str(section), data={\"features\": temp_dict},\n )\n\n self.logger.debug(\"%r loaded %d sections\", self, len(self.mem))\n\n async def dump_fd(self, fileobj):\n # Create an instance of configparser\n parser = ConfigParser()\n\n # Go over each section and record in mem\n for section, record in self.mem.items():\n # Get each section data as a dict\n section_data = record.features()\n if section not in parser.keys():\n # If section does not exist add new section\n parser.add_section(section)\n # Set section data\n parser[section] = section_data\n\n # Write to the fileobject\n parser.write(fileobj)\n\n self.logger.debug(\"%r saved %d sections\", self, len(self.mem))\n", "path": "dffml/source/ini.py"}]}
1,025
267
gh_patches_debug_35267
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-6952
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `hook-pkg_resources.py` is including hidden imports that are not available https://github.com/pyinstaller/pyinstaller/blob/3b5d233d02472ad9d589a798d19ad3d3df645223/PyInstaller/hooks/hook-pkg_resources.py#L20 and https://github.com/pyinstaller/pyinstaller/blob/3b5d233d02472ad9d589a798d19ad3d3df645223/PyInstaller/hooks/hook-pkg_resources.py#L30 Refer to https://github.com/pypa/setuptools/issues/1963 for the past need for including `py2_warn` and [the subsequent removal of py2_warn from setuptools](https://github.com/pypa/setuptools/pull/2238). The `pkg_resources.py2_warn` submodule was introduced in v45.0.0 and removed in v49.1.1. Therefore, an check for those versions would look like this: ```python if is_module_satisfies("setuptools >= 45.0.0, < 49.1.1"): hiddenimports.append('pkg_resources.py2_warn') ``` As for `pkg_resources.markers`, I haven't found any references to any `pkg_resources.markers` packages through Google searches. I'm not convinced that it ever existed at any point. Perhaps it was being confused for (or just a typo of) `packaging.markers`, which does exist. Since `hook-pkg_resources.py` already collects the submodules of `packaging`, there doesn't need to be an extra hidden import added for `packaging.markers`. Therefore, I think that the `pkg_resources.markers` hidden import line can be removed completely. Another module that it is trying to include is `pkg_resources._vendor.pyparsing.diagrams`. This module can only be used when the `railroad` module is available. Therefore, a check should be added to make sure that the `railroad` module is available, and filter out the diagrams module when it isn't. For example: ```python from PyInstaller.utils.hooks import ( collect_submodules, is_module_satisfies, can_import_module, is_module_or_submodule ) hiddenimports = [] # pkg_resources keeps vendored modules in its _vendor subpackage and does sys.meta_path based import magic to expose # them as pkg_resources.extern.* if not can_import_module('railroad'): # The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so # filter it out when `railroad` is not available. hiddenimports += collect_submodules( 'pkg_resources._vendor', lambda name: is_module_or_submodule(name, 'pkg_resources._vendor.pyparsing.diagrams')) else: hiddenimports += collect_submodules('pkg_resources._vendor') ``` PyInstaller: v5.1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `PyInstaller/hooks/hook-pkg_resources.py` Content: ``` 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2005-2022, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License (version 2 5 # or later) with exception for distributing the bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) 10 #----------------------------------------------------------------------------- 11 12 from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies 13 14 # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose 15 # them as pkg_resources.extern.* 16 hiddenimports = collect_submodules('pkg_resources._vendor') 17 18 # pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if 19 # we would replace this by a fake module. 20 hiddenimports.append('pkg_resources.py2_warn') 21 22 excludedimports = ['__main__'] 23 24 # Some more hidden imports. See: 25 # https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be 26 # its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if 27 # present. 28 hiddenimports += collect_submodules('packaging') 29 30 hiddenimports += ['pkg_resources.markers'] 31 32 # As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco 33 # namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook 34 # utilities. 35 # 36 # In setuptools 60.7.0, the vendored jaraco.text package included "Lorem Ipsum.txt" data file, which also has to be 37 # collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's 38 # redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools. 39 if is_module_satisfies("setuptools == 60.7.0"): 40 raise SystemExit( 41 "ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. " 42 "Downgrade to an earlier version or upgrade to a later version." 43 ) 44 # In setuptools 60.7.1, the "Lorem Ipsum.txt" data file was dropped from the vendored jaraco.text package, so we can 45 # accommodate it with couple of hidden imports. 46 elif is_module_satisfies("setuptools >= 60.7.1"): 47 hiddenimports += [ 48 'pkg_resources._vendor.jaraco.functools', 49 'pkg_resources._vendor.jaraco.context', 50 'pkg_resources._vendor.jaraco.text', 51 ] 52 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py --- a/PyInstaller/hooks/hook-pkg_resources.py +++ b/PyInstaller/hooks/hook-pkg_resources.py @@ -9,26 +9,32 @@ # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) #----------------------------------------------------------------------------- -from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies +from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies, can_import_module # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose # them as pkg_resources.extern.* -hiddenimports = collect_submodules('pkg_resources._vendor') + +# The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so +# filter it out when `railroad` is not available. +if can_import_module('railroad'): + hiddenimports = collect_submodules('pkg_resources._vendor') +else: + hiddenimports = collect_submodules( + 'pkg_resources._vendor', filter=lambda name: 'pkg_resources._vendor.pyparsing.diagram' not in name + ) # pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if # we would replace this by a fake module. -hiddenimports.append('pkg_resources.py2_warn') +if is_module_satisfies('setuptools >= 45.0.0, < 49.1.1'): + hiddenimports.append('pkg_resources.py2_warn') excludedimports = ['__main__'] # Some more hidden imports. See: # https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be -# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if -# present. +# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. hiddenimports += collect_submodules('packaging') -hiddenimports += ['pkg_resources.markers'] - # As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco # namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook # utilities.
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py\n--- a/PyInstaller/hooks/hook-pkg_resources.py\n+++ b/PyInstaller/hooks/hook-pkg_resources.py\n@@ -9,26 +9,32 @@\n # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n #-----------------------------------------------------------------------------\n \n-from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies\n+from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies, can_import_module\n \n # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n # them as pkg_resources.extern.*\n-hiddenimports = collect_submodules('pkg_resources._vendor')\n+\n+# The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so\n+# filter it out when `railroad` is not available.\n+if can_import_module('railroad'):\n+ hiddenimports = collect_submodules('pkg_resources._vendor')\n+else:\n+ hiddenimports = collect_submodules(\n+ 'pkg_resources._vendor', filter=lambda name: 'pkg_resources._vendor.pyparsing.diagram' not in name\n+ )\n \n # pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n # we would replace this by a fake module.\n-hiddenimports.append('pkg_resources.py2_warn')\n+if is_module_satisfies('setuptools >= 45.0.0, < 49.1.1'):\n+ hiddenimports.append('pkg_resources.py2_warn')\n \n excludedimports = ['__main__']\n \n # Some more hidden imports. See:\n # https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n-# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if\n-# present.\n+# its own package, or embedded in `pkg_resources._vendor.packaging`, or both.\n hiddenimports += collect_submodules('packaging')\n \n-hiddenimports += ['pkg_resources.markers']\n-\n # As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n # namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n # utilities.\n", "issue": "`hook-pkg_resources.py` is including hidden imports that are not available\nhttps://github.com/pyinstaller/pyinstaller/blob/3b5d233d02472ad9d589a798d19ad3d3df645223/PyInstaller/hooks/hook-pkg_resources.py#L20\r\n\r\nand\r\n\r\nhttps://github.com/pyinstaller/pyinstaller/blob/3b5d233d02472ad9d589a798d19ad3d3df645223/PyInstaller/hooks/hook-pkg_resources.py#L30\r\n\r\nRefer to https://github.com/pypa/setuptools/issues/1963 for the past need for including `py2_warn` and [the subsequent removal of py2_warn from setuptools](https://github.com/pypa/setuptools/pull/2238).\r\n\r\nThe `pkg_resources.py2_warn` submodule was introduced in v45.0.0 and removed in v49.1.1. Therefore, an check for those versions would look like this:\r\n```python\r\nif is_module_satisfies(\"setuptools >= 45.0.0, < 49.1.1\"):\r\n hiddenimports.append('pkg_resources.py2_warn')\r\n```\r\n\r\nAs for `pkg_resources.markers`, I haven't found any references to any `pkg_resources.markers` packages through Google searches. I'm not convinced that it ever existed at any point. Perhaps it was being confused for (or just a typo of) `packaging.markers`, which does exist. Since `hook-pkg_resources.py` already collects the submodules of `packaging`, there doesn't need to be an extra hidden import added for `packaging.markers`. Therefore, I think that the `pkg_resources.markers` hidden import line can be removed completely.\r\n\r\nAnother module that it is trying to include is `pkg_resources._vendor.pyparsing.diagrams`. This module can only be used when the `railroad` module is available. Therefore, a check should be added to make sure that the `railroad` module is available, and filter out the diagrams module when it isn't.\r\n\r\nFor example:\r\n```python\r\nfrom PyInstaller.utils.hooks import (\r\n collect_submodules,\r\n is_module_satisfies,\r\n can_import_module,\r\n is_module_or_submodule\r\n)\r\n\r\nhiddenimports = []\r\n\r\n# pkg_resources keeps vendored modules in its _vendor subpackage and does sys.meta_path based import magic to expose\r\n# them as pkg_resources.extern.*\r\nif not can_import_module('railroad'):\r\n # The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so\r\n # filter it out when `railroad` is not available.\r\n hiddenimports += collect_submodules(\r\n 'pkg_resources._vendor',\r\n lambda name: is_module_or_submodule(name, 'pkg_resources._vendor.pyparsing.diagrams'))\r\nelse:\r\n hiddenimports += collect_submodules('pkg_resources._vendor')\r\n```\r\n\r\nPyInstaller: v5.1\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2022, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_submodules, is_module_satisfies\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n# them as pkg_resources.extern.*\nhiddenimports = collect_submodules('pkg_resources._vendor')\n\n# pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n# we would replace this by a fake module.\nhiddenimports.append('pkg_resources.py2_warn')\n\nexcludedimports = ['__main__']\n\n# Some more hidden imports. See:\n# https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if\n# present.\nhiddenimports += collect_submodules('packaging')\n\nhiddenimports += ['pkg_resources.markers']\n\n# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n# utilities.\n#\n# In setuptools 60.7.0, the vendored jaraco.text package included \"Lorem Ipsum.txt\" data file, which also has to be\n# collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's\n# redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools.\nif is_module_satisfies(\"setuptools == 60.7.0\"):\n raise SystemExit(\n \"ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. \"\n \"Downgrade to an earlier version or upgrade to a later version.\"\n )\n# In setuptools 60.7.1, the \"Lorem Ipsum.txt\" data file was dropped from the vendored jaraco.text package, so we can\n# accommodate it with couple of hidden imports.\nelif is_module_satisfies(\"setuptools >= 60.7.1\"):\n hiddenimports += [\n 'pkg_resources._vendor.jaraco.functools',\n 'pkg_resources._vendor.jaraco.context',\n 'pkg_resources._vendor.jaraco.text',\n ]\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2022, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_submodules, is_module_satisfies, can_import_module\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n# them as pkg_resources.extern.*\n\n# The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so\n# filter it out when `railroad` is not available.\nif can_import_module('railroad'):\n hiddenimports = collect_submodules('pkg_resources._vendor')\nelse:\n hiddenimports = collect_submodules(\n 'pkg_resources._vendor', filter=lambda name: 'pkg_resources._vendor.pyparsing.diagram' not in name\n )\n\n# pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n# we would replace this by a fake module.\nif is_module_satisfies('setuptools >= 45.0.0, < 49.1.1'):\n hiddenimports.append('pkg_resources.py2_warn')\n\nexcludedimports = ['__main__']\n\n# Some more hidden imports. See:\n# https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n# its own package, or embedded in `pkg_resources._vendor.packaging`, or both.\nhiddenimports += collect_submodules('packaging')\n\n# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n# utilities.\n#\n# In setuptools 60.7.0, the vendored jaraco.text package included \"Lorem Ipsum.txt\" data file, which also has to be\n# collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's\n# redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools.\nif is_module_satisfies(\"setuptools == 60.7.0\"):\n raise SystemExit(\n \"ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. \"\n \"Downgrade to an earlier version or upgrade to a later version.\"\n )\n# In setuptools 60.7.1, the \"Lorem Ipsum.txt\" data file was dropped from the vendored jaraco.text package, so we can\n# accommodate it with couple of hidden imports.\nelif is_module_satisfies(\"setuptools >= 60.7.1\"):\n hiddenimports += [\n 'pkg_resources._vendor.jaraco.functools',\n 'pkg_resources._vendor.jaraco.context',\n 'pkg_resources._vendor.jaraco.text',\n ]\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}]}
1,624
551
gh_patches_debug_2753
rasdani/github-patches
git_diff
SeldonIO__MLServer-1172
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Star imports from `mlserver.codecs` not working For example: ```python from mlserver.codecs import * ``` Throws an error: ```python Traceback (most recent call last): File "/home/janis/.conda/envs/py310/lib/python3.10/site-packages/IPython/core/interactiveshell.py", line 3460, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-2-b8cc62508f29>", line 1, in <module> from mlserver.codecs import * AttributeError: module 'mlserver.codecs' has no attribute 'StringRequestCodec' ``` This is likely because `__all__` is out-of-date with the actual imports. I haven't tested other sub-packages, but it might be worth looking at these. P.S. I'm not a big fan of `__all__` and star imports in particular, the main issue is that the existence of `__all__` gives rise to two public APIs which may diverge (as it has in this case). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mlserver/codecs/__init__.py` Content: ``` 1 from .numpy import NumpyCodec, NumpyRequestCodec 2 from .pandas import PandasCodec 3 from .string import StringCodec 4 from .base64 import Base64Codec 5 from .datetime import DatetimeCodec 6 from .errors import CodecError 7 from .decorator import decode_args 8 from .base import ( 9 InputCodec, 10 RequestCodec, 11 register_input_codec, 12 register_request_codec, 13 InputCodecLike, 14 RequestCodecLike, 15 ) 16 from .utils import ( 17 DecodedParameterName, 18 has_decoded, 19 get_decoded, 20 get_decoded_or_raw, 21 encode_inference_response, 22 encode_response_output, 23 decode_request_input, 24 decode_inference_request, 25 ) 26 27 __all__ = [ 28 "CodecError", 29 "NumpyCodec", 30 "NumpyRequestCodec", 31 "StringCodec", 32 "StringRequestCodec", 33 "Base64Codec", 34 "DatetimeCodec", 35 "PandasCodec", 36 "InputCodec", 37 "InputCodecLike", 38 "RequestCodec", 39 "RequestCodecLike", 40 "DecodedParameterName", 41 "register_input_codec", 42 "register_request_codec", 43 "has_decoded", 44 "get_decoded", 45 "get_decoded_or_raw", 46 "encode_inference_response", 47 "encode_response_output", 48 "decode_request_input", 49 "decode_inference_request", 50 "decode_args", 51 ] 52 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mlserver/codecs/__init__.py b/mlserver/codecs/__init__.py --- a/mlserver/codecs/__init__.py +++ b/mlserver/codecs/__init__.py @@ -1,6 +1,6 @@ from .numpy import NumpyCodec, NumpyRequestCodec from .pandas import PandasCodec -from .string import StringCodec +from .string import StringCodec, StringRequestCodec from .base64 import Base64Codec from .datetime import DatetimeCodec from .errors import CodecError
{"golden_diff": "diff --git a/mlserver/codecs/__init__.py b/mlserver/codecs/__init__.py\n--- a/mlserver/codecs/__init__.py\n+++ b/mlserver/codecs/__init__.py\n@@ -1,6 +1,6 @@\n from .numpy import NumpyCodec, NumpyRequestCodec\n from .pandas import PandasCodec\n-from .string import StringCodec\n+from .string import StringCodec, StringRequestCodec\n from .base64 import Base64Codec\n from .datetime import DatetimeCodec\n from .errors import CodecError\n", "issue": "Star imports from `mlserver.codecs` not working\nFor example:\r\n\r\n```python\r\nfrom mlserver.codecs import *\r\n```\r\nThrows an error:\r\n```python\r\nTraceback (most recent call last):\r\n File \"/home/janis/.conda/envs/py310/lib/python3.10/site-packages/IPython/core/interactiveshell.py\", line 3460, in run_code\r\n exec(code_obj, self.user_global_ns, self.user_ns)\r\n File \"<ipython-input-2-b8cc62508f29>\", line 1, in <module>\r\n from mlserver.codecs import *\r\nAttributeError: module 'mlserver.codecs' has no attribute 'StringRequestCodec'\r\n```\r\n\r\nThis is likely because `__all__` is out-of-date with the actual imports. I haven't tested other sub-packages, but it might be worth looking at these.\r\n\r\nP.S. I'm not a big fan of `__all__` and star imports in particular, the main issue is that the existence of `__all__` gives rise to two public APIs which may diverge (as it has in this case).\n", "before_files": [{"content": "from .numpy import NumpyCodec, NumpyRequestCodec\nfrom .pandas import PandasCodec\nfrom .string import StringCodec\nfrom .base64 import Base64Codec\nfrom .datetime import DatetimeCodec\nfrom .errors import CodecError\nfrom .decorator import decode_args\nfrom .base import (\n InputCodec,\n RequestCodec,\n register_input_codec,\n register_request_codec,\n InputCodecLike,\n RequestCodecLike,\n)\nfrom .utils import (\n DecodedParameterName,\n has_decoded,\n get_decoded,\n get_decoded_or_raw,\n encode_inference_response,\n encode_response_output,\n decode_request_input,\n decode_inference_request,\n)\n\n__all__ = [\n \"CodecError\",\n \"NumpyCodec\",\n \"NumpyRequestCodec\",\n \"StringCodec\",\n \"StringRequestCodec\",\n \"Base64Codec\",\n \"DatetimeCodec\",\n \"PandasCodec\",\n \"InputCodec\",\n \"InputCodecLike\",\n \"RequestCodec\",\n \"RequestCodecLike\",\n \"DecodedParameterName\",\n \"register_input_codec\",\n \"register_request_codec\",\n \"has_decoded\",\n \"get_decoded\",\n \"get_decoded_or_raw\",\n \"encode_inference_response\",\n \"encode_response_output\",\n \"decode_request_input\",\n \"decode_inference_request\",\n \"decode_args\",\n]\n", "path": "mlserver/codecs/__init__.py"}], "after_files": [{"content": "from .numpy import NumpyCodec, NumpyRequestCodec\nfrom .pandas import PandasCodec\nfrom .string import StringCodec, StringRequestCodec\nfrom .base64 import Base64Codec\nfrom .datetime import DatetimeCodec\nfrom .errors import CodecError\nfrom .decorator import decode_args\nfrom .base import (\n InputCodec,\n RequestCodec,\n register_input_codec,\n register_request_codec,\n InputCodecLike,\n RequestCodecLike,\n)\nfrom .utils import (\n DecodedParameterName,\n has_decoded,\n get_decoded,\n get_decoded_or_raw,\n encode_inference_response,\n encode_response_output,\n decode_request_input,\n decode_inference_request,\n)\n\n__all__ = [\n \"CodecError\",\n \"NumpyCodec\",\n \"NumpyRequestCodec\",\n \"StringCodec\",\n \"StringRequestCodec\",\n \"Base64Codec\",\n \"DatetimeCodec\",\n \"PandasCodec\",\n \"InputCodec\",\n \"InputCodecLike\",\n \"RequestCodec\",\n \"RequestCodecLike\",\n \"DecodedParameterName\",\n \"register_input_codec\",\n \"register_request_codec\",\n \"has_decoded\",\n \"get_decoded\",\n \"get_decoded_or_raw\",\n \"encode_inference_response\",\n \"encode_response_output\",\n \"decode_request_input\",\n \"decode_inference_request\",\n \"decode_args\",\n]\n", "path": "mlserver/codecs/__init__.py"}]}
895
121
gh_patches_debug_19124
rasdani/github-patches
git_diff
TheAlgorithms__Python-664
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- errors in balanced_parentheses.py https://github.com/TheAlgorithms/Python/blob/6a95bf9ad8090a1280abf76752a52ab9ee2420b7/data_structures/stacks/balanced_parentheses.py#L13-L17 `Line 16` returns wrong result due to `not` `Line 15` can cause error for inputs like **))**. before calling _pop_ it should be checked if stack is empty. https://github.com/TheAlgorithms/Python/blob/6a95bf9ad8090a1280abf76752a52ab9ee2420b7/data_structures/stacks/balanced_parentheses.py#L3 there is no module named **.Stack**. it should be _from **s**tack import Stack_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `data_structures/stacks/balanced_parentheses.py` Content: ``` 1 from __future__ import print_function 2 from __future__ import absolute_import 3 from .Stack import Stack 4 5 __author__ = 'Omkar Pathak' 6 7 8 def balanced_parentheses(parentheses): 9 """ Use a stack to check if a string of parentheses is balanced.""" 10 stack = Stack(len(parentheses)) 11 for parenthesis in parentheses: 12 if parenthesis == '(': 13 stack.push(parenthesis) 14 elif parenthesis == ')': 15 stack.pop() 16 return not stack.is_empty() 17 18 19 if __name__ == '__main__': 20 examples = ['((()))', '((())'] 21 print('Balanced parentheses demonstration:\n') 22 for example in examples: 23 print(example + ': ' + str(balanced_parentheses(example))) 24 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/data_structures/stacks/balanced_parentheses.py b/data_structures/stacks/balanced_parentheses.py --- a/data_structures/stacks/balanced_parentheses.py +++ b/data_structures/stacks/balanced_parentheses.py @@ -1,6 +1,6 @@ from __future__ import print_function from __future__ import absolute_import -from .Stack import Stack +from stack import Stack __author__ = 'Omkar Pathak' @@ -12,12 +12,14 @@ if parenthesis == '(': stack.push(parenthesis) elif parenthesis == ')': + if stack.is_empty(): + return False stack.pop() - return not stack.is_empty() + return stack.is_empty() if __name__ == '__main__': - examples = ['((()))', '((())'] + examples = ['((()))', '((())', '(()))'] print('Balanced parentheses demonstration:\n') for example in examples: print(example + ': ' + str(balanced_parentheses(example)))
{"golden_diff": "diff --git a/data_structures/stacks/balanced_parentheses.py b/data_structures/stacks/balanced_parentheses.py\n--- a/data_structures/stacks/balanced_parentheses.py\n+++ b/data_structures/stacks/balanced_parentheses.py\n@@ -1,6 +1,6 @@\n from __future__ import print_function\n from __future__ import absolute_import\n-from .Stack import Stack\n+from stack import Stack\n \n __author__ = 'Omkar Pathak'\n \n@@ -12,12 +12,14 @@\n if parenthesis == '(':\n stack.push(parenthesis)\n elif parenthesis == ')':\n+ if stack.is_empty():\n+ return False\n stack.pop()\n- return not stack.is_empty()\n+ return stack.is_empty()\n \n \n if __name__ == '__main__':\n- examples = ['((()))', '((())']\n+ examples = ['((()))', '((())', '(()))']\n print('Balanced parentheses demonstration:\\n')\n for example in examples:\n print(example + ': ' + str(balanced_parentheses(example)))\n", "issue": "errors in balanced_parentheses.py\nhttps://github.com/TheAlgorithms/Python/blob/6a95bf9ad8090a1280abf76752a52ab9ee2420b7/data_structures/stacks/balanced_parentheses.py#L13-L17\r\n\r\n`Line 16` returns wrong result due to `not`\r\n\r\n`Line 15` can cause error for inputs like **))**. before calling _pop_ it should be checked if stack is empty.\r\n\r\nhttps://github.com/TheAlgorithms/Python/blob/6a95bf9ad8090a1280abf76752a52ab9ee2420b7/data_structures/stacks/balanced_parentheses.py#L3\r\n\r\nthere is no module named **.Stack**. it should be\r\n _from **s**tack import Stack_\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom .Stack import Stack\n\n__author__ = 'Omkar Pathak'\n\n\ndef balanced_parentheses(parentheses):\n \"\"\" Use a stack to check if a string of parentheses is balanced.\"\"\"\n stack = Stack(len(parentheses))\n for parenthesis in parentheses:\n if parenthesis == '(':\n stack.push(parenthesis)\n elif parenthesis == ')':\n stack.pop()\n return not stack.is_empty()\n\n\nif __name__ == '__main__':\n examples = ['((()))', '((())']\n print('Balanced parentheses demonstration:\\n')\n for example in examples:\n print(example + ': ' + str(balanced_parentheses(example)))\n", "path": "data_structures/stacks/balanced_parentheses.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom stack import Stack\n\n__author__ = 'Omkar Pathak'\n\n\ndef balanced_parentheses(parentheses):\n \"\"\" Use a stack to check if a string of parentheses is balanced.\"\"\"\n stack = Stack(len(parentheses))\n for parenthesis in parentheses:\n if parenthesis == '(':\n stack.push(parenthesis)\n elif parenthesis == ')':\n if stack.is_empty():\n return False\n stack.pop()\n return stack.is_empty()\n\n\nif __name__ == '__main__':\n examples = ['((()))', '((())', '(()))']\n print('Balanced parentheses demonstration:\\n')\n for example in examples:\n print(example + ': ' + str(balanced_parentheses(example)))\n", "path": "data_structures/stacks/balanced_parentheses.py"}]}
667
244
gh_patches_debug_26805
rasdani/github-patches
git_diff
streamlink__streamlink-1268
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Kanal7 Plugin defective! Hi there, can you have a look on the kanal7.py please? "error: No playable streams found on this URL" Greetings --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugins/kanal7.py` Content: ``` 1 from __future__ import print_function 2 import re 3 4 from streamlink.plugin import Plugin 5 from streamlink.plugin.api import http 6 from streamlink.plugin.api import validate 7 from streamlink.stream import HLSStream 8 9 10 class Kanal7(Plugin): 11 url_re = re.compile(r"https?://(?:www.)?kanal7.com/canli-izle") 12 iframe_re = re.compile(r'iframe .*?src="(http://[^"]*?)"') 13 stream_re = re.compile(r'src: "(http[^"]*?)"') 14 15 @classmethod 16 def can_handle_url(cls, url): 17 return cls.url_re.match(url) is not None 18 19 def find_iframe(self, url): 20 res = http.get(url) 21 # find iframe url 22 iframe = self.iframe_re.search(res.text) 23 iframe_url = iframe and iframe.group(1) 24 if iframe_url: 25 self.logger.debug("Found iframe: {}", iframe_url) 26 return iframe_url 27 28 def _get_streams(self): 29 iframe1 = self.find_iframe(self.url) 30 if iframe1: 31 iframe2 = self.find_iframe(iframe1) 32 if iframe2: 33 ires = http.get(iframe2) 34 stream_m = self.stream_re.search(ires.text) 35 stream_url = stream_m and stream_m.group(1) 36 if stream_url: 37 yield "live", HLSStream(self.session, stream_url) 38 else: 39 self.logger.error("Could not find second iframe, has the page layout changed?") 40 else: 41 self.logger.error("Could not find iframe, has the page layout changed?") 42 43 44 __plugin__ = Kanal7 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugins/kanal7.py b/src/streamlink/plugins/kanal7.py --- a/src/streamlink/plugins/kanal7.py +++ b/src/streamlink/plugins/kanal7.py @@ -3,6 +3,7 @@ from streamlink.plugin import Plugin from streamlink.plugin.api import http +from streamlink.plugin.api import useragents from streamlink.plugin.api import validate from streamlink.stream import HLSStream @@ -10,7 +11,7 @@ class Kanal7(Plugin): url_re = re.compile(r"https?://(?:www.)?kanal7.com/canli-izle") iframe_re = re.compile(r'iframe .*?src="(http://[^"]*?)"') - stream_re = re.compile(r'src: "(http[^"]*?)"') + stream_re = re.compile(r'src="(http[^"]*?)"') @classmethod def can_handle_url(cls, url): @@ -34,7 +35,7 @@ stream_m = self.stream_re.search(ires.text) stream_url = stream_m and stream_m.group(1) if stream_url: - yield "live", HLSStream(self.session, stream_url) + yield "live", HLSStream(self.session, stream_url, headers={"Referer": iframe2}) else: self.logger.error("Could not find second iframe, has the page layout changed?") else:
{"golden_diff": "diff --git a/src/streamlink/plugins/kanal7.py b/src/streamlink/plugins/kanal7.py\n--- a/src/streamlink/plugins/kanal7.py\n+++ b/src/streamlink/plugins/kanal7.py\n@@ -3,6 +3,7 @@\n \n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http\n+from streamlink.plugin.api import useragents\n from streamlink.plugin.api import validate\n from streamlink.stream import HLSStream\n \n@@ -10,7 +11,7 @@\n class Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n- stream_re = re.compile(r'src: \"(http[^\"]*?)\"')\n+ stream_re = re.compile(r'src=\"(http[^\"]*?)\"')\n \n @classmethod\n def can_handle_url(cls, url):\n@@ -34,7 +35,7 @@\n stream_m = self.stream_re.search(ires.text)\n stream_url = stream_m and stream_m.group(1)\n if stream_url:\n- yield \"live\", HLSStream(self.session, stream_url)\n+ yield \"live\", HLSStream(self.session, stream_url, headers={\"Referer\": iframe2})\n else:\n self.logger.error(\"Could not find second iframe, has the page layout changed?\")\n else:\n", "issue": "Kanal7 Plugin defective!\nHi there,\r\n\r\ncan you have a look on the kanal7.py please?\r\n\r\n\"error: No playable streams found on this URL\"\r\n\r\nGreetings\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n stream_re = re.compile(r'src: \"(http[^\"]*?)\"')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def find_iframe(self, url):\n res = http.get(url)\n # find iframe url\n iframe = self.iframe_re.search(res.text)\n iframe_url = iframe and iframe.group(1)\n if iframe_url:\n self.logger.debug(\"Found iframe: {}\", iframe_url)\n return iframe_url\n\n def _get_streams(self):\n iframe1 = self.find_iframe(self.url)\n if iframe1:\n iframe2 = self.find_iframe(iframe1)\n if iframe2:\n ires = http.get(iframe2)\n stream_m = self.stream_re.search(ires.text)\n stream_url = stream_m and stream_m.group(1)\n if stream_url:\n yield \"live\", HLSStream(self.session, stream_url)\n else:\n self.logger.error(\"Could not find second iframe, has the page layout changed?\")\n else:\n self.logger.error(\"Could not find iframe, has the page layout changed?\")\n\n\n__plugin__ = Kanal7\n", "path": "src/streamlink/plugins/kanal7.py"}], "after_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import useragents\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n stream_re = re.compile(r'src=\"(http[^\"]*?)\"')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def find_iframe(self, url):\n res = http.get(url)\n # find iframe url\n iframe = self.iframe_re.search(res.text)\n iframe_url = iframe and iframe.group(1)\n if iframe_url:\n self.logger.debug(\"Found iframe: {}\", iframe_url)\n return iframe_url\n\n def _get_streams(self):\n iframe1 = self.find_iframe(self.url)\n if iframe1:\n iframe2 = self.find_iframe(iframe1)\n if iframe2:\n ires = http.get(iframe2)\n stream_m = self.stream_re.search(ires.text)\n stream_url = stream_m and stream_m.group(1)\n if stream_url:\n yield \"live\", HLSStream(self.session, stream_url, headers={\"Referer\": iframe2})\n else:\n self.logger.error(\"Could not find second iframe, has the page layout changed?\")\n else:\n self.logger.error(\"Could not find iframe, has the page layout changed?\")\n\n\n__plugin__ = Kanal7\n", "path": "src/streamlink/plugins/kanal7.py"}]}
732
317
gh_patches_debug_23275
rasdani/github-patches
git_diff
wagtail__wagtail-3304
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- draft and live action labels are unclear ![screen shot 2017-01-05 at 12 42 52](https://cloud.githubusercontent.com/assets/7290588/21680947/944efe00-d344-11e6-9cbd-870f91fe9d5f.png) I don't think it's clear what the 'draft' and 'live' buttons will do. Could we include the full labels 'Preview draft' and 'View live'? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/wagtailadmin/wagtail_hooks.py` Content: ``` 1 from __future__ import absolute_import, unicode_literals 2 3 from django import forms 4 from django.contrib.auth.models import Permission 5 from django.contrib.staticfiles.templatetags.staticfiles import static 6 from django.core.urlresolvers import reverse 7 from django.utils.translation import ugettext_lazy as _ 8 from wagtail.wagtailadmin.menu import MenuItem, SubmenuMenuItem, settings_menu 9 from wagtail.wagtailadmin.search import SearchArea 10 from wagtail.wagtailadmin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton 11 from wagtail.wagtailcore import hooks 12 from wagtail.wagtailcore.permissions import collection_permission_policy 13 14 15 class ExplorerMenuItem(MenuItem): 16 @property 17 def media(self): 18 return forms.Media(js=[static('wagtailadmin/js/explorer-menu.js')]) 19 20 21 @hooks.register('register_admin_menu_item') 22 def register_explorer_menu_item(): 23 return ExplorerMenuItem( 24 _('Explorer'), reverse('wagtailadmin_explore_root'), 25 name='explorer', 26 classnames='icon icon-folder-open-inverse dl-trigger', 27 attrs={'data-explorer-menu-url': reverse('wagtailadmin_explorer_nav')}, 28 order=100) 29 30 31 class SettingsMenuItem(SubmenuMenuItem): 32 template = 'wagtailadmin/shared/menu_settings_menu_item.html' 33 34 35 @hooks.register('register_admin_menu_item') 36 def register_settings_menu(): 37 return SettingsMenuItem( 38 _('Settings'), settings_menu, classnames='icon icon-cogs', order=10000) 39 40 41 @hooks.register('register_permissions') 42 def register_permissions(): 43 return Permission.objects.filter(content_type__app_label='wagtailadmin', codename='access_admin') 44 45 46 @hooks.register('register_admin_search_area') 47 def register_pages_search_area(): 48 return SearchArea( 49 _('Pages'), reverse('wagtailadmin_pages:search'), 50 name='pages', 51 classnames='icon icon-folder-open-inverse', 52 order=100) 53 54 55 class CollectionsMenuItem(MenuItem): 56 def is_shown(self, request): 57 return collection_permission_policy.user_has_any_permission( 58 request.user, ['add', 'change', 'delete'] 59 ) 60 61 62 @hooks.register('register_settings_menu_item') 63 def register_collections_menu_item(): 64 return CollectionsMenuItem(_('Collections'), reverse('wagtailadmin_collections:index'), classnames='icon icon-folder-open-1', order=700) 65 66 67 @hooks.register('register_page_listing_buttons') 68 def page_listing_buttons(page, page_perms, is_parent=False): 69 if page_perms.can_edit(): 70 yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]), 71 attrs={'title': _('Edit this page')}, priority=10) 72 if page.has_unpublished_changes: 73 yield PageListingButton(_('Draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]), 74 attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20) 75 if page.live and page.url: 76 yield PageListingButton(_('Live'), page.url, attrs={'target': "_blank", 'title': _('View live')}, priority=30) 77 if page_perms.can_add_subpage(): 78 if is_parent: 79 yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]), 80 attrs={'title': _("Add a child page to '{0}' ").format(page.get_admin_display_title())}, classes={'button', 'button-small', 'bicolor', 'icon', 'white', 'icon-plus'}, priority=40) 81 else: 82 yield PageListingButton(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]), 83 attrs={'title': _("Add a child page to '{0}' ").format(page.get_admin_display_title())}, priority=40) 84 85 yield ButtonWithDropdownFromHook( 86 _('More'), 87 hook_name='register_page_listing_more_buttons', 88 page=page, 89 page_perms=page_perms, 90 is_parent=is_parent, 91 attrs={'target': '_blank', 'title': _('View more options')}, priority=50) 92 93 94 @hooks.register('register_page_listing_more_buttons') 95 def page_listing_more_buttons(page, page_perms, is_parent=False): 96 if page_perms.can_move(): 97 yield Button(_('Move'), reverse('wagtailadmin_pages:move', args=[page.id]), 98 attrs={"title": _('Move this page')}, priority=10) 99 if not page.is_root(): 100 yield Button(_('Copy'), reverse('wagtailadmin_pages:copy', args=[page.id]), 101 attrs={'title': _('Copy this page')}, priority=20) 102 if page_perms.can_delete(): 103 yield Button(_('Delete'), reverse('wagtailadmin_pages:delete', args=[page.id]), 104 attrs={'title': _('Delete this page')}, priority=30) 105 if page_perms.can_unpublish(): 106 yield Button(_('Unpublish'), reverse('wagtailadmin_pages:unpublish', args=[page.id]), 107 attrs={'title': _('Unpublish this page')}, priority=40) 108 if not page.is_root(): 109 yield Button(_('Revisions'), reverse('wagtailadmin_pages:revisions_index', args=[page.id]), 110 attrs={'title': _("View this page's revision history")}, priority=50) 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/wagtailadmin/wagtail_hooks.py b/wagtail/wagtailadmin/wagtail_hooks.py --- a/wagtail/wagtailadmin/wagtail_hooks.py +++ b/wagtail/wagtailadmin/wagtail_hooks.py @@ -70,10 +70,10 @@ yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]), attrs={'title': _('Edit this page')}, priority=10) if page.has_unpublished_changes: - yield PageListingButton(_('Draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]), + yield PageListingButton(_('View draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]), attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20) if page.live and page.url: - yield PageListingButton(_('Live'), page.url, attrs={'target': "_blank", 'title': _('View live')}, priority=30) + yield PageListingButton(_('View live'), page.url, attrs={'target': "_blank", 'title': _('View live')}, priority=30) if page_perms.can_add_subpage(): if is_parent: yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),
{"golden_diff": "diff --git a/wagtail/wagtailadmin/wagtail_hooks.py b/wagtail/wagtailadmin/wagtail_hooks.py\n--- a/wagtail/wagtailadmin/wagtail_hooks.py\n+++ b/wagtail/wagtailadmin/wagtail_hooks.py\n@@ -70,10 +70,10 @@\n yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]),\n attrs={'title': _('Edit this page')}, priority=10)\n if page.has_unpublished_changes:\n- yield PageListingButton(_('Draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n+ yield PageListingButton(_('View draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20)\n if page.live and page.url:\n- yield PageListingButton(_('Live'), page.url, attrs={'target': \"_blank\", 'title': _('View live')}, priority=30)\n+ yield PageListingButton(_('View live'), page.url, attrs={'target': \"_blank\", 'title': _('View live')}, priority=30)\n if page_perms.can_add_subpage():\n if is_parent:\n yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n", "issue": "draft and live action labels are unclear \n![screen shot 2017-01-05 at 12 42 52](https://cloud.githubusercontent.com/assets/7290588/21680947/944efe00-d344-11e6-9cbd-870f91fe9d5f.png)\r\n\r\nI don't think it's clear what the 'draft' and 'live' buttons will do. Could we include the full labels 'Preview draft' and 'View live'? \n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django import forms\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom wagtail.wagtailadmin.menu import MenuItem, SubmenuMenuItem, settings_menu\nfrom wagtail.wagtailadmin.search import SearchArea\nfrom wagtail.wagtailadmin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.permissions import collection_permission_policy\n\n\nclass ExplorerMenuItem(MenuItem):\n @property\n def media(self):\n return forms.Media(js=[static('wagtailadmin/js/explorer-menu.js')])\n\n\n@hooks.register('register_admin_menu_item')\ndef register_explorer_menu_item():\n return ExplorerMenuItem(\n _('Explorer'), reverse('wagtailadmin_explore_root'),\n name='explorer',\n classnames='icon icon-folder-open-inverse dl-trigger',\n attrs={'data-explorer-menu-url': reverse('wagtailadmin_explorer_nav')},\n order=100)\n\n\nclass SettingsMenuItem(SubmenuMenuItem):\n template = 'wagtailadmin/shared/menu_settings_menu_item.html'\n\n\n@hooks.register('register_admin_menu_item')\ndef register_settings_menu():\n return SettingsMenuItem(\n _('Settings'), settings_menu, classnames='icon icon-cogs', order=10000)\n\n\n@hooks.register('register_permissions')\ndef register_permissions():\n return Permission.objects.filter(content_type__app_label='wagtailadmin', codename='access_admin')\n\n\n@hooks.register('register_admin_search_area')\ndef register_pages_search_area():\n return SearchArea(\n _('Pages'), reverse('wagtailadmin_pages:search'),\n name='pages',\n classnames='icon icon-folder-open-inverse',\n order=100)\n\n\nclass CollectionsMenuItem(MenuItem):\n def is_shown(self, request):\n return collection_permission_policy.user_has_any_permission(\n request.user, ['add', 'change', 'delete']\n )\n\n\n@hooks.register('register_settings_menu_item')\ndef register_collections_menu_item():\n return CollectionsMenuItem(_('Collections'), reverse('wagtailadmin_collections:index'), classnames='icon icon-folder-open-1', order=700)\n\n\n@hooks.register('register_page_listing_buttons')\ndef page_listing_buttons(page, page_perms, is_parent=False):\n if page_perms.can_edit():\n yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]),\n attrs={'title': _('Edit this page')}, priority=10)\n if page.has_unpublished_changes:\n yield PageListingButton(_('Draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20)\n if page.live and page.url:\n yield PageListingButton(_('Live'), page.url, attrs={'target': \"_blank\", 'title': _('View live')}, priority=30)\n if page_perms.can_add_subpage():\n if is_parent:\n yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={'title': _(\"Add a child page to '{0}' \").format(page.get_admin_display_title())}, classes={'button', 'button-small', 'bicolor', 'icon', 'white', 'icon-plus'}, priority=40)\n else:\n yield PageListingButton(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={'title': _(\"Add a child page to '{0}' \").format(page.get_admin_display_title())}, priority=40)\n\n yield ButtonWithDropdownFromHook(\n _('More'),\n hook_name='register_page_listing_more_buttons',\n page=page,\n page_perms=page_perms,\n is_parent=is_parent,\n attrs={'target': '_blank', 'title': _('View more options')}, priority=50)\n\n\n@hooks.register('register_page_listing_more_buttons')\ndef page_listing_more_buttons(page, page_perms, is_parent=False):\n if page_perms.can_move():\n yield Button(_('Move'), reverse('wagtailadmin_pages:move', args=[page.id]),\n attrs={\"title\": _('Move this page')}, priority=10)\n if not page.is_root():\n yield Button(_('Copy'), reverse('wagtailadmin_pages:copy', args=[page.id]),\n attrs={'title': _('Copy this page')}, priority=20)\n if page_perms.can_delete():\n yield Button(_('Delete'), reverse('wagtailadmin_pages:delete', args=[page.id]),\n attrs={'title': _('Delete this page')}, priority=30)\n if page_perms.can_unpublish():\n yield Button(_('Unpublish'), reverse('wagtailadmin_pages:unpublish', args=[page.id]),\n attrs={'title': _('Unpublish this page')}, priority=40)\n if not page.is_root():\n yield Button(_('Revisions'), reverse('wagtailadmin_pages:revisions_index', args=[page.id]),\n attrs={'title': _(\"View this page's revision history\")}, priority=50)\n", "path": "wagtail/wagtailadmin/wagtail_hooks.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django import forms\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom wagtail.wagtailadmin.menu import MenuItem, SubmenuMenuItem, settings_menu\nfrom wagtail.wagtailadmin.search import SearchArea\nfrom wagtail.wagtailadmin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.permissions import collection_permission_policy\n\n\nclass ExplorerMenuItem(MenuItem):\n @property\n def media(self):\n return forms.Media(js=[static('wagtailadmin/js/explorer-menu.js')])\n\n\n@hooks.register('register_admin_menu_item')\ndef register_explorer_menu_item():\n return ExplorerMenuItem(\n _('Explorer'), reverse('wagtailadmin_explore_root'),\n name='explorer',\n classnames='icon icon-folder-open-inverse dl-trigger',\n attrs={'data-explorer-menu-url': reverse('wagtailadmin_explorer_nav')},\n order=100)\n\n\nclass SettingsMenuItem(SubmenuMenuItem):\n template = 'wagtailadmin/shared/menu_settings_menu_item.html'\n\n\n@hooks.register('register_admin_menu_item')\ndef register_settings_menu():\n return SettingsMenuItem(\n _('Settings'), settings_menu, classnames='icon icon-cogs', order=10000)\n\n\n@hooks.register('register_permissions')\ndef register_permissions():\n return Permission.objects.filter(content_type__app_label='wagtailadmin', codename='access_admin')\n\n\n@hooks.register('register_admin_search_area')\ndef register_pages_search_area():\n return SearchArea(\n _('Pages'), reverse('wagtailadmin_pages:search'),\n name='pages',\n classnames='icon icon-folder-open-inverse',\n order=100)\n\n\nclass CollectionsMenuItem(MenuItem):\n def is_shown(self, request):\n return collection_permission_policy.user_has_any_permission(\n request.user, ['add', 'change', 'delete']\n )\n\n\n@hooks.register('register_settings_menu_item')\ndef register_collections_menu_item():\n return CollectionsMenuItem(_('Collections'), reverse('wagtailadmin_collections:index'), classnames='icon icon-folder-open-1', order=700)\n\n\n@hooks.register('register_page_listing_buttons')\ndef page_listing_buttons(page, page_perms, is_parent=False):\n if page_perms.can_edit():\n yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]),\n attrs={'title': _('Edit this page')}, priority=10)\n if page.has_unpublished_changes:\n yield PageListingButton(_('View draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20)\n if page.live and page.url:\n yield PageListingButton(_('View live'), page.url, attrs={'target': \"_blank\", 'title': _('View live')}, priority=30)\n if page_perms.can_add_subpage():\n if is_parent:\n yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={'title': _(\"Add a child page to '{0}' \").format(page.get_admin_display_title())}, classes={'button', 'button-small', 'bicolor', 'icon', 'white', 'icon-plus'}, priority=40)\n else:\n yield PageListingButton(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={'title': _(\"Add a child page to '{0}' \").format(page.get_admin_display_title())}, priority=40)\n\n yield ButtonWithDropdownFromHook(\n _('More'),\n hook_name='register_page_listing_more_buttons',\n page=page,\n page_perms=page_perms,\n is_parent=is_parent,\n attrs={'target': '_blank', 'title': _('View more options')}, priority=50)\n\n\n@hooks.register('register_page_listing_more_buttons')\ndef page_listing_more_buttons(page, page_perms, is_parent=False):\n if page_perms.can_move():\n yield Button(_('Move'), reverse('wagtailadmin_pages:move', args=[page.id]),\n attrs={\"title\": _('Move this page')}, priority=10)\n if not page.is_root():\n yield Button(_('Copy'), reverse('wagtailadmin_pages:copy', args=[page.id]),\n attrs={'title': _('Copy this page')}, priority=20)\n if page_perms.can_delete():\n yield Button(_('Delete'), reverse('wagtailadmin_pages:delete', args=[page.id]),\n attrs={'title': _('Delete this page')}, priority=30)\n if page_perms.can_unpublish():\n yield Button(_('Unpublish'), reverse('wagtailadmin_pages:unpublish', args=[page.id]),\n attrs={'title': _('Unpublish this page')}, priority=40)\n if not page.is_root():\n yield Button(_('Revisions'), reverse('wagtailadmin_pages:revisions_index', args=[page.id]),\n attrs={'title': _(\"View this page's revision history\")}, priority=50)\n", "path": "wagtail/wagtailadmin/wagtail_hooks.py"}]}
1,740
301
gh_patches_debug_14487
rasdani/github-patches
git_diff
svthalia__concrexit-2853
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make API redirects respect query params <!-- Please add the appropriate label for what change should be made: docs: changes to the documentation) refactor: refactoring production code, eg. renaming a variable or rewriting a function test: adding missing tests, refactoring tests; no production code change chore: updating poetry etc; no production code change --> ### Describe the change As seen in #2828 and #2821, some old API endpoints are redirected to their new replacements. This redirection does not propagate any query parameters. It would be nice to pass query parameters along when redirecting to a replacement API endpoint to prevent breaking consumers. ### Motivation #2828 #2821 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/partners/api/v2/urls.py` Content: ``` 1 """Partners app API v2 urls.""" 2 from django.urls import path 3 from django.views.generic import RedirectView 4 5 from partners.api.v2.views import ( 6 PartnerDetailView, 7 PartnerListView, 8 VacancyCategoryListView, 9 VacancyDetailView, 10 VacancyListView, 11 ) 12 13 app_name = "partners" 14 15 urlpatterns = [ 16 path( 17 "partners/events/", 18 RedirectView.as_view( 19 pattern_name="api:v2:events:external-events-list", permanent=False 20 ), 21 name="partner-events-list", 22 ), 23 path( 24 "partners/events/<int:pk>/", 25 RedirectView.as_view( 26 pattern_name="api:v2:events:external-event-detail", permanent=False 27 ), 28 name="partner-events-detail", 29 ), 30 path("partners/vacancies/", VacancyListView.as_view(), name="vacancies-list"), 31 path( 32 "partners/vacancies/categories/", 33 VacancyCategoryListView.as_view(), 34 name="vacancy-categories-list", 35 ), 36 path( 37 "partners/vacancies/<int:pk>/", 38 VacancyDetailView.as_view(), 39 name="vacancies-detail", 40 ), 41 path("partners/", PartnerListView.as_view(), name="partners-list"), 42 path("partners/<int:pk>/", PartnerDetailView.as_view(), name="partners-detail"), 43 ] 44 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/partners/api/v2/urls.py b/website/partners/api/v2/urls.py --- a/website/partners/api/v2/urls.py +++ b/website/partners/api/v2/urls.py @@ -16,14 +16,18 @@ path( "partners/events/", RedirectView.as_view( - pattern_name="api:v2:events:external-events-list", permanent=False + pattern_name="api:v2:events:external-events-list", + permanent=False, + query_string=True, ), name="partner-events-list", ), path( "partners/events/<int:pk>/", RedirectView.as_view( - pattern_name="api:v2:events:external-event-detail", permanent=False + pattern_name="api:v2:events:external-event-detail", + permanent=False, + query_string=True, ), name="partner-events-detail", ),
{"golden_diff": "diff --git a/website/partners/api/v2/urls.py b/website/partners/api/v2/urls.py\n--- a/website/partners/api/v2/urls.py\n+++ b/website/partners/api/v2/urls.py\n@@ -16,14 +16,18 @@\n path(\n \"partners/events/\",\n RedirectView.as_view(\n- pattern_name=\"api:v2:events:external-events-list\", permanent=False\n+ pattern_name=\"api:v2:events:external-events-list\",\n+ permanent=False,\n+ query_string=True,\n ),\n name=\"partner-events-list\",\n ),\n path(\n \"partners/events/<int:pk>/\",\n RedirectView.as_view(\n- pattern_name=\"api:v2:events:external-event-detail\", permanent=False\n+ pattern_name=\"api:v2:events:external-event-detail\",\n+ permanent=False,\n+ query_string=True,\n ),\n name=\"partner-events-detail\",\n ),\n", "issue": "Make API redirects respect query params\n<!--\n\nPlease add the appropriate label for what change should be made:\ndocs: changes to the documentation)\nrefactor: refactoring production code, eg. renaming a variable or rewriting a function\ntest: adding missing tests, refactoring tests; no production code change\nchore: updating poetry etc; no production code change\n\n-->\n\n### Describe the change\nAs seen in #2828 and #2821, some old API endpoints are redirected to their new replacements. This redirection does not propagate any query parameters. It would be nice to pass query parameters along when redirecting to a replacement API endpoint to prevent breaking consumers.\n\n### Motivation\n#2828 #2821\n", "before_files": [{"content": "\"\"\"Partners app API v2 urls.\"\"\"\nfrom django.urls import path\nfrom django.views.generic import RedirectView\n\nfrom partners.api.v2.views import (\n PartnerDetailView,\n PartnerListView,\n VacancyCategoryListView,\n VacancyDetailView,\n VacancyListView,\n)\n\napp_name = \"partners\"\n\nurlpatterns = [\n path(\n \"partners/events/\",\n RedirectView.as_view(\n pattern_name=\"api:v2:events:external-events-list\", permanent=False\n ),\n name=\"partner-events-list\",\n ),\n path(\n \"partners/events/<int:pk>/\",\n RedirectView.as_view(\n pattern_name=\"api:v2:events:external-event-detail\", permanent=False\n ),\n name=\"partner-events-detail\",\n ),\n path(\"partners/vacancies/\", VacancyListView.as_view(), name=\"vacancies-list\"),\n path(\n \"partners/vacancies/categories/\",\n VacancyCategoryListView.as_view(),\n name=\"vacancy-categories-list\",\n ),\n path(\n \"partners/vacancies/<int:pk>/\",\n VacancyDetailView.as_view(),\n name=\"vacancies-detail\",\n ),\n path(\"partners/\", PartnerListView.as_view(), name=\"partners-list\"),\n path(\"partners/<int:pk>/\", PartnerDetailView.as_view(), name=\"partners-detail\"),\n]\n", "path": "website/partners/api/v2/urls.py"}], "after_files": [{"content": "\"\"\"Partners app API v2 urls.\"\"\"\nfrom django.urls import path\nfrom django.views.generic import RedirectView\n\nfrom partners.api.v2.views import (\n PartnerDetailView,\n PartnerListView,\n VacancyCategoryListView,\n VacancyDetailView,\n VacancyListView,\n)\n\napp_name = \"partners\"\n\nurlpatterns = [\n path(\n \"partners/events/\",\n RedirectView.as_view(\n pattern_name=\"api:v2:events:external-events-list\",\n permanent=False,\n query_string=True,\n ),\n name=\"partner-events-list\",\n ),\n path(\n \"partners/events/<int:pk>/\",\n RedirectView.as_view(\n pattern_name=\"api:v2:events:external-event-detail\",\n permanent=False,\n query_string=True,\n ),\n name=\"partner-events-detail\",\n ),\n path(\"partners/vacancies/\", VacancyListView.as_view(), name=\"vacancies-list\"),\n path(\n \"partners/vacancies/categories/\",\n VacancyCategoryListView.as_view(),\n name=\"vacancy-categories-list\",\n ),\n path(\n \"partners/vacancies/<int:pk>/\",\n VacancyDetailView.as_view(),\n name=\"vacancies-detail\",\n ),\n path(\"partners/\", PartnerListView.as_view(), name=\"partners-list\"),\n path(\"partners/<int:pk>/\", PartnerDetailView.as_view(), name=\"partners-detail\"),\n]\n", "path": "website/partners/api/v2/urls.py"}]}
763
208
gh_patches_debug_8139
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-5957
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve msg error for submodules with invalid url schemas We only support `http` schemas, but the error message isn't clear about that. Here is a good suggestion: https://github.com/readthedocs/readthedocs.org/issues/5928#issuecomment-511708197 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/projects/exceptions.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """Project exceptions.""" 4 5 from django.conf import settings 6 from django.utils.translation import ugettext_noop as _ 7 8 from readthedocs.doc_builder.exceptions import BuildEnvironmentError 9 10 11 class ProjectConfigurationError(BuildEnvironmentError): 12 13 """Error raised trying to configure a project for build.""" 14 15 NOT_FOUND = _( 16 'A configuration file was not found. ' 17 'Make sure you have a conf.py file in your repository.', 18 ) 19 20 MULTIPLE_CONF_FILES = _( 21 'We found more than one conf.py and are not sure which one to use. ' 22 'Please, specify the correct file under the Advanced settings tab ' 23 "in the project's Admin.", 24 ) 25 26 27 class RepositoryError(BuildEnvironmentError): 28 29 """Failure during repository operation.""" 30 31 PRIVATE_ALLOWED = _( 32 'There was a problem connecting to your repository, ' 33 'ensure that your repository URL is correct.', 34 ) 35 PRIVATE_NOT_ALLOWED = _( 36 'There was a problem connecting to your repository, ' 37 'ensure that your repository URL is correct and your repository is public. ' 38 'Private repositories are not supported.', 39 ) 40 41 INVALID_SUBMODULES = _('One or more submodule URLs are not valid: {}.') 42 INVALID_SUBMODULES_PATH = _( 43 'One or more submodule paths are not valid. ' 44 'Check that all your submodules in .gitmodules are used.' 45 ) 46 47 DUPLICATED_RESERVED_VERSIONS = _( 48 'You can not have two versions with the name latest or stable.', 49 ) 50 51 FAILED_TO_CHECKOUT = _('Failed to checkout revision: {}') 52 53 def get_default_message(self): 54 if settings.ALLOW_PRIVATE_REPOS: 55 return self.PRIVATE_ALLOWED 56 return self.PRIVATE_NOT_ALLOWED 57 58 59 class ProjectSpamError(Exception): 60 61 """ 62 Error raised when a project field has detected spam. 63 64 This error is not raised to users, we use this for banning users in the 65 background. 66 """ 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/projects/exceptions.py b/readthedocs/projects/exceptions.py --- a/readthedocs/projects/exceptions.py +++ b/readthedocs/projects/exceptions.py @@ -38,7 +38,10 @@ 'Private repositories are not supported.', ) - INVALID_SUBMODULES = _('One or more submodule URLs are not valid: {}.') + INVALID_SUBMODULES = _( + 'One or more submodule URLs are not valid: {}, ' + 'git/ssh URL schemas for submodules are not supported.' + ) INVALID_SUBMODULES_PATH = _( 'One or more submodule paths are not valid. ' 'Check that all your submodules in .gitmodules are used.'
{"golden_diff": "diff --git a/readthedocs/projects/exceptions.py b/readthedocs/projects/exceptions.py\n--- a/readthedocs/projects/exceptions.py\n+++ b/readthedocs/projects/exceptions.py\n@@ -38,7 +38,10 @@\n 'Private repositories are not supported.',\n )\n \n- INVALID_SUBMODULES = _('One or more submodule URLs are not valid: {}.')\n+ INVALID_SUBMODULES = _(\n+ 'One or more submodule URLs are not valid: {}, '\n+ 'git/ssh URL schemas for submodules are not supported.'\n+ )\n INVALID_SUBMODULES_PATH = _(\n 'One or more submodule paths are not valid. '\n 'Check that all your submodules in .gitmodules are used.'\n", "issue": "Improve msg error for submodules with invalid url schemas\nWe only support `http` schemas, but the error message isn't clear about that. Here is a good suggestion: https://github.com/readthedocs/readthedocs.org/issues/5928#issuecomment-511708197\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Project exceptions.\"\"\"\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_noop as _\n\nfrom readthedocs.doc_builder.exceptions import BuildEnvironmentError\n\n\nclass ProjectConfigurationError(BuildEnvironmentError):\n\n \"\"\"Error raised trying to configure a project for build.\"\"\"\n\n NOT_FOUND = _(\n 'A configuration file was not found. '\n 'Make sure you have a conf.py file in your repository.',\n )\n\n MULTIPLE_CONF_FILES = _(\n 'We found more than one conf.py and are not sure which one to use. '\n 'Please, specify the correct file under the Advanced settings tab '\n \"in the project's Admin.\",\n )\n\n\nclass RepositoryError(BuildEnvironmentError):\n\n \"\"\"Failure during repository operation.\"\"\"\n\n PRIVATE_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct.',\n )\n PRIVATE_NOT_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct and your repository is public. '\n 'Private repositories are not supported.',\n )\n\n INVALID_SUBMODULES = _('One or more submodule URLs are not valid: {}.')\n INVALID_SUBMODULES_PATH = _(\n 'One or more submodule paths are not valid. '\n 'Check that all your submodules in .gitmodules are used.'\n )\n\n DUPLICATED_RESERVED_VERSIONS = _(\n 'You can not have two versions with the name latest or stable.',\n )\n\n FAILED_TO_CHECKOUT = _('Failed to checkout revision: {}')\n\n def get_default_message(self):\n if settings.ALLOW_PRIVATE_REPOS:\n return self.PRIVATE_ALLOWED\n return self.PRIVATE_NOT_ALLOWED\n\n\nclass ProjectSpamError(Exception):\n\n \"\"\"\n Error raised when a project field has detected spam.\n\n This error is not raised to users, we use this for banning users in the\n background.\n \"\"\"\n", "path": "readthedocs/projects/exceptions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Project exceptions.\"\"\"\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_noop as _\n\nfrom readthedocs.doc_builder.exceptions import BuildEnvironmentError\n\n\nclass ProjectConfigurationError(BuildEnvironmentError):\n\n \"\"\"Error raised trying to configure a project for build.\"\"\"\n\n NOT_FOUND = _(\n 'A configuration file was not found. '\n 'Make sure you have a conf.py file in your repository.',\n )\n\n MULTIPLE_CONF_FILES = _(\n 'We found more than one conf.py and are not sure which one to use. '\n 'Please, specify the correct file under the Advanced settings tab '\n \"in the project's Admin.\",\n )\n\n\nclass RepositoryError(BuildEnvironmentError):\n\n \"\"\"Failure during repository operation.\"\"\"\n\n PRIVATE_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct.',\n )\n PRIVATE_NOT_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct and your repository is public. '\n 'Private repositories are not supported.',\n )\n\n INVALID_SUBMODULES = _(\n 'One or more submodule URLs are not valid: {}, '\n 'git/ssh URL schemas for submodules are not supported.'\n )\n INVALID_SUBMODULES_PATH = _(\n 'One or more submodule paths are not valid. '\n 'Check that all your submodules in .gitmodules are used.'\n )\n\n DUPLICATED_RESERVED_VERSIONS = _(\n 'You can not have two versions with the name latest or stable.',\n )\n\n FAILED_TO_CHECKOUT = _('Failed to checkout revision: {}')\n\n def get_default_message(self):\n if settings.ALLOW_PRIVATE_REPOS:\n return self.PRIVATE_ALLOWED\n return self.PRIVATE_NOT_ALLOWED\n\n\nclass ProjectSpamError(Exception):\n\n \"\"\"\n Error raised when a project field has detected spam.\n\n This error is not raised to users, we use this for banning users in the\n background.\n \"\"\"\n", "path": "readthedocs/projects/exceptions.py"}]}
877
160
gh_patches_debug_16298
rasdani/github-patches
git_diff
getsentry__sentry-python-612
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DeprecationWarning in sqlalchemy integration At integrations/sqlalchemy.py line 28 - integration is listening for dbapi_error, this was deprecated in sqlalchemy a long time ago and should be replaced with handle_error event --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sentry_sdk/integrations/sqlalchemy.py` Content: ``` 1 from __future__ import absolute_import 2 3 from sentry_sdk._types import MYPY 4 from sentry_sdk.hub import Hub 5 from sentry_sdk.integrations import Integration 6 from sentry_sdk.tracing import record_sql_queries 7 8 from sqlalchemy.engine import Engine # type: ignore 9 from sqlalchemy.event import listen # type: ignore 10 11 if MYPY: 12 from typing import Any 13 from typing import ContextManager 14 from typing import Optional 15 16 from sentry_sdk.tracing import Span 17 18 19 class SqlalchemyIntegration(Integration): 20 identifier = "sqlalchemy" 21 22 @staticmethod 23 def setup_once(): 24 # type: () -> None 25 26 listen(Engine, "before_cursor_execute", _before_cursor_execute) 27 listen(Engine, "after_cursor_execute", _after_cursor_execute) 28 listen(Engine, "dbapi_error", _dbapi_error) 29 30 31 def _before_cursor_execute( 32 conn, cursor, statement, parameters, context, executemany, *args 33 ): 34 # type: (Any, Any, Any, Any, Any, bool, *Any) -> None 35 hub = Hub.current 36 if hub.get_integration(SqlalchemyIntegration) is None: 37 return 38 39 ctx_mgr = record_sql_queries( 40 hub, 41 cursor, 42 statement, 43 parameters, 44 paramstyle=context and context.dialect and context.dialect.paramstyle or None, 45 executemany=executemany, 46 ) 47 conn._sentry_sql_span_manager = ctx_mgr 48 49 span = ctx_mgr.__enter__() 50 51 if span is not None: 52 conn._sentry_sql_span = span 53 54 55 def _after_cursor_execute(conn, cursor, statement, *args): 56 # type: (Any, Any, Any, *Any) -> None 57 ctx_mgr = getattr( 58 conn, "_sentry_sql_span_manager", None 59 ) # type: ContextManager[Any] 60 61 if ctx_mgr is not None: 62 conn._sentry_sql_span_manager = None 63 ctx_mgr.__exit__(None, None, None) 64 65 66 def _dbapi_error(conn, *args): 67 # type: (Any, *Any) -> None 68 span = getattr(conn, "_sentry_sql_span", None) # type: Optional[Span] 69 70 if span is not None: 71 span.set_status("internal_error") 72 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sentry_sdk/integrations/sqlalchemy.py b/sentry_sdk/integrations/sqlalchemy.py --- a/sentry_sdk/integrations/sqlalchemy.py +++ b/sentry_sdk/integrations/sqlalchemy.py @@ -25,7 +25,7 @@ listen(Engine, "before_cursor_execute", _before_cursor_execute) listen(Engine, "after_cursor_execute", _after_cursor_execute) - listen(Engine, "dbapi_error", _dbapi_error) + listen(Engine, "handle_error", _handle_error) def _before_cursor_execute( @@ -63,8 +63,9 @@ ctx_mgr.__exit__(None, None, None) -def _dbapi_error(conn, *args): +def _handle_error(context, *args): # type: (Any, *Any) -> None + conn = context.connection span = getattr(conn, "_sentry_sql_span", None) # type: Optional[Span] if span is not None:
{"golden_diff": "diff --git a/sentry_sdk/integrations/sqlalchemy.py b/sentry_sdk/integrations/sqlalchemy.py\n--- a/sentry_sdk/integrations/sqlalchemy.py\n+++ b/sentry_sdk/integrations/sqlalchemy.py\n@@ -25,7 +25,7 @@\n \n listen(Engine, \"before_cursor_execute\", _before_cursor_execute)\n listen(Engine, \"after_cursor_execute\", _after_cursor_execute)\n- listen(Engine, \"dbapi_error\", _dbapi_error)\n+ listen(Engine, \"handle_error\", _handle_error)\n \n \n def _before_cursor_execute(\n@@ -63,8 +63,9 @@\n ctx_mgr.__exit__(None, None, None)\n \n \n-def _dbapi_error(conn, *args):\n+def _handle_error(context, *args):\n # type: (Any, *Any) -> None\n+ conn = context.connection\n span = getattr(conn, \"_sentry_sql_span\", None) # type: Optional[Span]\n \n if span is not None:\n", "issue": "DeprecationWarning in sqlalchemy integration\nAt integrations/sqlalchemy.py line 28 - integration is listening for dbapi_error, this was deprecated in sqlalchemy a long time ago and should be replaced with handle_error event\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.tracing import record_sql_queries\n\nfrom sqlalchemy.engine import Engine # type: ignore\nfrom sqlalchemy.event import listen # type: ignore\n\nif MYPY:\n from typing import Any\n from typing import ContextManager\n from typing import Optional\n\n from sentry_sdk.tracing import Span\n\n\nclass SqlalchemyIntegration(Integration):\n identifier = \"sqlalchemy\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n listen(Engine, \"before_cursor_execute\", _before_cursor_execute)\n listen(Engine, \"after_cursor_execute\", _after_cursor_execute)\n listen(Engine, \"dbapi_error\", _dbapi_error)\n\n\ndef _before_cursor_execute(\n conn, cursor, statement, parameters, context, executemany, *args\n):\n # type: (Any, Any, Any, Any, Any, bool, *Any) -> None\n hub = Hub.current\n if hub.get_integration(SqlalchemyIntegration) is None:\n return\n\n ctx_mgr = record_sql_queries(\n hub,\n cursor,\n statement,\n parameters,\n paramstyle=context and context.dialect and context.dialect.paramstyle or None,\n executemany=executemany,\n )\n conn._sentry_sql_span_manager = ctx_mgr\n\n span = ctx_mgr.__enter__()\n\n if span is not None:\n conn._sentry_sql_span = span\n\n\ndef _after_cursor_execute(conn, cursor, statement, *args):\n # type: (Any, Any, Any, *Any) -> None\n ctx_mgr = getattr(\n conn, \"_sentry_sql_span_manager\", None\n ) # type: ContextManager[Any]\n\n if ctx_mgr is not None:\n conn._sentry_sql_span_manager = None\n ctx_mgr.__exit__(None, None, None)\n\n\ndef _dbapi_error(conn, *args):\n # type: (Any, *Any) -> None\n span = getattr(conn, \"_sentry_sql_span\", None) # type: Optional[Span]\n\n if span is not None:\n span.set_status(\"internal_error\")\n", "path": "sentry_sdk/integrations/sqlalchemy.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.tracing import record_sql_queries\n\nfrom sqlalchemy.engine import Engine # type: ignore\nfrom sqlalchemy.event import listen # type: ignore\n\nif MYPY:\n from typing import Any\n from typing import ContextManager\n from typing import Optional\n\n from sentry_sdk.tracing import Span\n\n\nclass SqlalchemyIntegration(Integration):\n identifier = \"sqlalchemy\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n listen(Engine, \"before_cursor_execute\", _before_cursor_execute)\n listen(Engine, \"after_cursor_execute\", _after_cursor_execute)\n listen(Engine, \"handle_error\", _handle_error)\n\n\ndef _before_cursor_execute(\n conn, cursor, statement, parameters, context, executemany, *args\n):\n # type: (Any, Any, Any, Any, Any, bool, *Any) -> None\n hub = Hub.current\n if hub.get_integration(SqlalchemyIntegration) is None:\n return\n\n ctx_mgr = record_sql_queries(\n hub,\n cursor,\n statement,\n parameters,\n paramstyle=context and context.dialect and context.dialect.paramstyle or None,\n executemany=executemany,\n )\n conn._sentry_sql_span_manager = ctx_mgr\n\n span = ctx_mgr.__enter__()\n\n if span is not None:\n conn._sentry_sql_span = span\n\n\ndef _after_cursor_execute(conn, cursor, statement, *args):\n # type: (Any, Any, Any, *Any) -> None\n ctx_mgr = getattr(\n conn, \"_sentry_sql_span_manager\", None\n ) # type: ContextManager[Any]\n\n if ctx_mgr is not None:\n conn._sentry_sql_span_manager = None\n ctx_mgr.__exit__(None, None, None)\n\n\ndef _handle_error(context, *args):\n # type: (Any, *Any) -> None\n conn = context.connection\n span = getattr(conn, \"_sentry_sql_span\", None) # type: Optional[Span]\n\n if span is not None:\n span.set_status(\"internal_error\")\n", "path": "sentry_sdk/integrations/sqlalchemy.py"}]}
948
223
gh_patches_debug_1197
rasdani/github-patches
git_diff
keras-team__keras-nlp-1166
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add compute_output_shape method to WordPieceTokenizer When we run Pretraining Transformer from Scratch guide with PyTorch and JAX backend, it raises ``` RuntimeError: Exception encountered when calling WordPieceTokenizer.call(). Could not automatically infer the output shape / dtype of 'word_piece_tokenizer_1' (of type WordPieceTokenizer). Either the `WordPieceTokenizer.call()` method is incorrect, or you need to implement the `WordPieceTokenizer.compute_output_spec() / compute_output_shape()` method. Error encountered: 'string' Arguments received by WordPieceTokenizer.call(): • args=('<KerasTensor shape=(None,), dtype=string, name=keras_tensor_59>',) • kwargs=<class 'inspect._empty'> ``` cc: @mattdangerw --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `keras_nlp/tokenizers/tokenizer.py` Content: ``` 1 # Copyright 2023 The KerasNLP Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import List 16 17 from keras_nlp.api_export import keras_nlp_export 18 from keras_nlp.layers.preprocessing.preprocessing_layer import ( 19 PreprocessingLayer, 20 ) 21 22 23 @keras_nlp_export("keras_nlp.tokenizers.Tokenizer") 24 class Tokenizer(PreprocessingLayer): 25 """A base class for tokenizer layers. 26 27 Tokenizers in the KerasNLP library should all subclass this layer. 28 The class provides two core methods `tokenize()` and `detokenize()` for 29 going from plain text to sequences and back. A tokenizer is a subclass of 30 `keras.layers.Layer` and can be combined into a `keras.Model`. 31 32 Subclassers should always implement the `tokenize()` method, which will also 33 be the default when calling the layer directly on inputs. 34 35 Subclassers can optionally implement the `detokenize()` method if the 36 tokenization is reversible. Otherwise, this can be skipped. 37 38 Subclassers should implement `get_vocabulary()`, `vocabulary_size()`, 39 `token_to_id()` and `id_to_token()` if applicable. For some simple 40 "vocab free" tokenizers, such as a whitespace splitter show below, these 41 methods do not apply and can be skipped. 42 43 Examples: 44 45 ```python 46 class WhitespaceSplitterTokenizer(keras_nlp.tokenizers.Tokenizer): 47 def tokenize(self, inputs): 48 return tf.strings.split(inputs) 49 50 def detokenize(self, inputs): 51 return tf.strings.reduce_join(inputs, separator=" ", axis=-1) 52 53 tokenizer = WhitespaceSplitterTokenizer() 54 55 # Tokenize some inputs. 56 tokenizer.tokenize("This is a test") 57 58 # Shorthard for `tokenize()`. 59 tokenizer("This is a test") 60 61 # Detokenize some outputs. 62 tokenizer.detokenize(["This", "is", "a", "test"]) 63 ``` 64 """ 65 66 def __init__(self, *args, **kwargs): 67 super().__init__(*args, **kwargs) 68 69 def tokenize(self, inputs, *args, **kwargs): 70 """Transform input tensors of strings into output tokens. 71 72 Args: 73 inputs: Input tensor, or dict/list/tuple of input tensors. 74 *args: Additional positional arguments. 75 **kwargs: Additional keyword arguments. 76 """ 77 raise NotImplementedError( 78 "No implementation of `tokenize()` was found for " 79 f"{self.__class__.__name__}. All tokenizers should implement " 80 "`tokenize()`." 81 ) 82 83 def detokenize(self, inputs, *args, **kwargs): 84 """Transform tokens back into strings. 85 86 Args: 87 inputs: Input tensor, or dict/list/tuple of input tensors. 88 *args: Additional positional arguments. 89 **kwargs: Additional keyword arguments. 90 """ 91 raise NotImplementedError( 92 "No implementation of `detokenize()` was found for " 93 f"{self.__class__.__name__}." 94 ) 95 96 def get_vocabulary(self) -> List[str]: 97 """Get the tokenizer vocabulary as a list of strings terms.""" 98 raise NotImplementedError( 99 "No implementation of `get_vocabulary()` was found for " 100 f"{self.__class__.__name__}." 101 ) 102 103 def vocabulary_size(self) -> int: 104 """Returns the total size of the token id space.""" 105 raise NotImplementedError( 106 "No implementation of `vocabulary_size()` was found for " 107 f"{self.__class__.__name__}." 108 ) 109 110 def id_to_token(self, id: int) -> str: 111 """Convert an integer id to a string token.""" 112 raise NotImplementedError( 113 "No implementation of `id_to_token()` was found for " 114 f"{self.__class__.__name__}." 115 ) 116 117 def token_to_id(self, token: str) -> int: 118 """Convert an integer id to a string token.""" 119 raise NotImplementedError( 120 "No implementation of `id_to_token()` was found for " 121 f"{self.__class__.__name__}." 122 ) 123 124 def call(self, inputs, *args, training=None, **kwargs): 125 return self.tokenize(inputs, *args, **kwargs) 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/keras_nlp/tokenizers/tokenizer.py b/keras_nlp/tokenizers/tokenizer.py --- a/keras_nlp/tokenizers/tokenizer.py +++ b/keras_nlp/tokenizers/tokenizer.py @@ -123,3 +123,6 @@ def call(self, inputs, *args, training=None, **kwargs): return self.tokenize(inputs, *args, **kwargs) + + def compute_output_shape(self, inputs_shape): + return tuple(inputs_shape) + (self.sequence_length,)
{"golden_diff": "diff --git a/keras_nlp/tokenizers/tokenizer.py b/keras_nlp/tokenizers/tokenizer.py\n--- a/keras_nlp/tokenizers/tokenizer.py\n+++ b/keras_nlp/tokenizers/tokenizer.py\n@@ -123,3 +123,6 @@\n \n def call(self, inputs, *args, training=None, **kwargs):\n return self.tokenize(inputs, *args, **kwargs)\n+\n+ def compute_output_shape(self, inputs_shape):\n+ return tuple(inputs_shape) + (self.sequence_length,)\n", "issue": "Add compute_output_shape method to WordPieceTokenizer\nWhen we run Pretraining Transformer from Scratch guide with PyTorch and JAX backend, it raises\r\n\r\n```\r\n\r\nRuntimeError: Exception encountered when calling WordPieceTokenizer.call().\r\n\r\nCould not automatically infer the output shape / dtype of 'word_piece_tokenizer_1' (of type WordPieceTokenizer). Either the `WordPieceTokenizer.call()` method is incorrect, or you need to implement the `WordPieceTokenizer.compute_output_spec() / compute_output_shape()` method. Error encountered:\r\n\r\n'string'\r\n\r\nArguments received by WordPieceTokenizer.call():\r\n \u2022 args=('<KerasTensor shape=(None,), dtype=string, name=keras_tensor_59>',)\r\n \u2022 kwargs=<class 'inspect._empty'>\r\n```\r\n\r\ncc: @mattdangerw \n", "before_files": [{"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.layers.preprocessing.preprocessing_layer import (\n PreprocessingLayer,\n)\n\n\n@keras_nlp_export(\"keras_nlp.tokenizers.Tokenizer\")\nclass Tokenizer(PreprocessingLayer):\n \"\"\"A base class for tokenizer layers.\n\n Tokenizers in the KerasNLP library should all subclass this layer.\n The class provides two core methods `tokenize()` and `detokenize()` for\n going from plain text to sequences and back. A tokenizer is a subclass of\n `keras.layers.Layer` and can be combined into a `keras.Model`.\n\n Subclassers should always implement the `tokenize()` method, which will also\n be the default when calling the layer directly on inputs.\n\n Subclassers can optionally implement the `detokenize()` method if the\n tokenization is reversible. Otherwise, this can be skipped.\n\n Subclassers should implement `get_vocabulary()`, `vocabulary_size()`,\n `token_to_id()` and `id_to_token()` if applicable. For some simple\n \"vocab free\" tokenizers, such as a whitespace splitter show below, these\n methods do not apply and can be skipped.\n\n Examples:\n\n ```python\n class WhitespaceSplitterTokenizer(keras_nlp.tokenizers.Tokenizer):\n def tokenize(self, inputs):\n return tf.strings.split(inputs)\n\n def detokenize(self, inputs):\n return tf.strings.reduce_join(inputs, separator=\" \", axis=-1)\n\n tokenizer = WhitespaceSplitterTokenizer()\n\n # Tokenize some inputs.\n tokenizer.tokenize(\"This is a test\")\n\n # Shorthard for `tokenize()`.\n tokenizer(\"This is a test\")\n\n # Detokenize some outputs.\n tokenizer.detokenize([\"This\", \"is\", \"a\", \"test\"])\n ```\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def tokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform input tensors of strings into output tokens.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `tokenize()` was found for \"\n f\"{self.__class__.__name__}. All tokenizers should implement \"\n \"`tokenize()`.\"\n )\n\n def detokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform tokens back into strings.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `detokenize()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def get_vocabulary(self) -> List[str]:\n \"\"\"Get the tokenizer vocabulary as a list of strings terms.\"\"\"\n raise NotImplementedError(\n \"No implementation of `get_vocabulary()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def vocabulary_size(self) -> int:\n \"\"\"Returns the total size of the token id space.\"\"\"\n raise NotImplementedError(\n \"No implementation of `vocabulary_size()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def id_to_token(self, id: int) -> str:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def token_to_id(self, token: str) -> int:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def call(self, inputs, *args, training=None, **kwargs):\n return self.tokenize(inputs, *args, **kwargs)\n", "path": "keras_nlp/tokenizers/tokenizer.py"}], "after_files": [{"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.layers.preprocessing.preprocessing_layer import (\n PreprocessingLayer,\n)\n\n\n@keras_nlp_export(\"keras_nlp.tokenizers.Tokenizer\")\nclass Tokenizer(PreprocessingLayer):\n \"\"\"A base class for tokenizer layers.\n\n Tokenizers in the KerasNLP library should all subclass this layer.\n The class provides two core methods `tokenize()` and `detokenize()` for\n going from plain text to sequences and back. A tokenizer is a subclass of\n `keras.layers.Layer` and can be combined into a `keras.Model`.\n\n Subclassers should always implement the `tokenize()` method, which will also\n be the default when calling the layer directly on inputs.\n\n Subclassers can optionally implement the `detokenize()` method if the\n tokenization is reversible. Otherwise, this can be skipped.\n\n Subclassers should implement `get_vocabulary()`, `vocabulary_size()`,\n `token_to_id()` and `id_to_token()` if applicable. For some simple\n \"vocab free\" tokenizers, such as a whitespace splitter show below, these\n methods do not apply and can be skipped.\n\n Examples:\n\n ```python\n class WhitespaceSplitterTokenizer(keras_nlp.tokenizers.Tokenizer):\n def tokenize(self, inputs):\n return tf.strings.split(inputs)\n\n def detokenize(self, inputs):\n return tf.strings.reduce_join(inputs, separator=\" \", axis=-1)\n\n tokenizer = WhitespaceSplitterTokenizer()\n\n # Tokenize some inputs.\n tokenizer.tokenize(\"This is a test\")\n\n # Shorthard for `tokenize()`.\n tokenizer(\"This is a test\")\n\n # Detokenize some outputs.\n tokenizer.detokenize([\"This\", \"is\", \"a\", \"test\"])\n ```\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def tokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform input tensors of strings into output tokens.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `tokenize()` was found for \"\n f\"{self.__class__.__name__}. All tokenizers should implement \"\n \"`tokenize()`.\"\n )\n\n def detokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform tokens back into strings.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `detokenize()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def get_vocabulary(self) -> List[str]:\n \"\"\"Get the tokenizer vocabulary as a list of strings terms.\"\"\"\n raise NotImplementedError(\n \"No implementation of `get_vocabulary()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def vocabulary_size(self) -> int:\n \"\"\"Returns the total size of the token id space.\"\"\"\n raise NotImplementedError(\n \"No implementation of `vocabulary_size()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def id_to_token(self, id: int) -> str:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def token_to_id(self, token: str) -> int:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def call(self, inputs, *args, training=None, **kwargs):\n return self.tokenize(inputs, *args, **kwargs)\n\n def compute_output_shape(self, inputs_shape):\n return tuple(inputs_shape) + (self.sequence_length,)\n", "path": "keras_nlp/tokenizers/tokenizer.py"}]}
1,712
120
gh_patches_debug_9283
rasdani/github-patches
git_diff
locustio__locust-1918
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- the report cant show the right time <!-- If you have a general question about how to use Locust, please check Stack Overflow first https://stackoverflow.com/questions/tagged/locust You can also ask new questions on SO, https://stackoverflow.com/questions/ask just remember to tag your question with "locust". Do not immediately post your issue here after posting to SO, wait for an answer there instead. Use this form only for reporting actual bugs in Locust. Remember, the developers of Locust are unpaid volunteers, so make sure you have tried everything you can think of before filing a bug :) Always make sure you are running an up to date Locust version (pip3 install -U locust) --> ### Describe the bug <!-- A clear and concise description of what the bug is --> ![image](https://user-images.githubusercontent.com/47803360/138236838-6d62d47c-aa4d-4de7-b714-d94b7dff639a.png) ![image](https://user-images.githubusercontent.com/47803360/138236861-d318f852-da06-41e9-abcb-fa18d47166c7.png) this picture is right time i am in china ### Expected behavior <!-- Tell us what you think should happen --> Wrong time zone reported ### Actual behavior <!-- Tell us what happens instead. Include screenshots if this an issue with the GUI. --> ### Steps to reproduce <!-- Please provide a minimal reproducible code example (https://stackoverflow.com/help/minimal-reproducible-example) --> ### Environment - OS: - Python version: - Locust version: (please dont file issues for anything but the most recent release or prerelease builds) - Locust command line that you ran: - Locust file contents (anonymized if necessary): --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locust/html.py` Content: ``` 1 from jinja2 import Environment, FileSystemLoader 2 import os 3 import pathlib 4 import datetime 5 from itertools import chain 6 from .stats import sort_stats 7 from .user.inspectuser import get_task_ratio_dict 8 from html import escape 9 from json import dumps 10 11 12 def render_template(file, **kwargs): 13 templates_path = os.path.join(pathlib.Path(__file__).parent.absolute(), "templates") 14 env = Environment(loader=FileSystemLoader(templates_path), extensions=["jinja2.ext.do"]) 15 template = env.get_template(file) 16 return template.render(**kwargs) 17 18 19 def get_html_report(environment, show_download_link=True): 20 stats = environment.runner.stats 21 22 start_ts = stats.start_time 23 start_time = datetime.datetime.fromtimestamp(start_ts).strftime("%Y-%m-%d %H:%M:%S") 24 25 end_ts = stats.last_request_timestamp 26 if end_ts: 27 end_time = datetime.datetime.fromtimestamp(end_ts).strftime("%Y-%m-%d %H:%M:%S") 28 else: 29 end_time = start_time 30 31 host = None 32 if environment.host: 33 host = environment.host 34 elif environment.runner.user_classes: 35 all_hosts = set([l.host for l in environment.runner.user_classes]) 36 if len(all_hosts) == 1: 37 host = list(all_hosts)[0] 38 39 requests_statistics = list(chain(sort_stats(stats.entries), [stats.total])) 40 failures_statistics = sort_stats(stats.errors) 41 exceptions_statistics = [ 42 {**exc, "nodes": ", ".join(exc["nodes"])} for exc in environment.runner.exceptions.values() 43 ] 44 45 history = stats.history 46 47 static_js = [] 48 js_files = ["jquery-1.11.3.min.js", "echarts.common.min.js", "vintage.js", "chart.js", "tasks.js"] 49 for js_file in js_files: 50 path = os.path.join(os.path.dirname(__file__), "static", js_file) 51 static_js.append("// " + js_file) 52 with open(path, encoding="utf8") as f: 53 static_js.append(f.read()) 54 static_js.extend(["", ""]) 55 56 static_css = [] 57 css_files = ["tables.css"] 58 for css_file in css_files: 59 path = os.path.join(os.path.dirname(__file__), "static", "css", css_file) 60 static_css.append("/* " + css_file + " */") 61 with open(path, encoding="utf8") as f: 62 static_css.append(f.read()) 63 static_css.extend(["", ""]) 64 65 task_data = { 66 "per_class": get_task_ratio_dict(environment.user_classes), 67 "total": get_task_ratio_dict(environment.user_classes, total=True), 68 } 69 70 res = render_template( 71 "report.html", 72 int=int, 73 round=round, 74 requests_statistics=requests_statistics, 75 failures_statistics=failures_statistics, 76 exceptions_statistics=exceptions_statistics, 77 start_time=start_time, 78 end_time=end_time, 79 host=host, 80 history=history, 81 static_js="\n".join(static_js), 82 static_css="\n".join(static_css), 83 show_download_link=show_download_link, 84 locustfile=environment.locustfile, 85 tasks=escape(dumps(task_data)), 86 ) 87 88 return res 89 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locust/html.py b/locust/html.py --- a/locust/html.py +++ b/locust/html.py @@ -20,11 +20,11 @@ stats = environment.runner.stats start_ts = stats.start_time - start_time = datetime.datetime.fromtimestamp(start_ts).strftime("%Y-%m-%d %H:%M:%S") + start_time = datetime.datetime.utcfromtimestamp(start_ts).strftime("%Y-%m-%d %H:%M:%S") end_ts = stats.last_request_timestamp if end_ts: - end_time = datetime.datetime.fromtimestamp(end_ts).strftime("%Y-%m-%d %H:%M:%S") + end_time = datetime.datetime.utcfromtimestamp(end_ts).strftime("%Y-%m-%d %H:%M:%S") else: end_time = start_time
{"golden_diff": "diff --git a/locust/html.py b/locust/html.py\n--- a/locust/html.py\n+++ b/locust/html.py\n@@ -20,11 +20,11 @@\n stats = environment.runner.stats\n \n start_ts = stats.start_time\n- start_time = datetime.datetime.fromtimestamp(start_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n+ start_time = datetime.datetime.utcfromtimestamp(start_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n \n end_ts = stats.last_request_timestamp\n if end_ts:\n- end_time = datetime.datetime.fromtimestamp(end_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n+ end_time = datetime.datetime.utcfromtimestamp(end_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n end_time = start_time\n", "issue": "the report cant show the right time\n<!-- \r\nIf you have a general question about how to use Locust, please check Stack Overflow first https://stackoverflow.com/questions/tagged/locust\r\n\r\nYou can also ask new questions on SO, https://stackoverflow.com/questions/ask just remember to tag your question with \"locust\". Do not immediately post your issue here after posting to SO, wait for an answer there instead.\r\n\r\nUse this form only for reporting actual bugs in Locust. Remember, the developers of Locust are unpaid volunteers, so make sure you have tried everything you can think of before filing a bug :) \r\n\r\nAlways make sure you are running an up to date Locust version (pip3 install -U locust)\r\n-->\r\n\r\n### Describe the bug\r\n<!-- A clear and concise description of what the bug is -->\r\n![image](https://user-images.githubusercontent.com/47803360/138236838-6d62d47c-aa4d-4de7-b714-d94b7dff639a.png)\r\n![image](https://user-images.githubusercontent.com/47803360/138236861-d318f852-da06-41e9-abcb-fa18d47166c7.png)\r\nthis picture is right time i am in china\r\n\r\n\r\n### Expected behavior\r\n<!-- Tell us what you think should happen -->\r\nWrong time zone reported\r\n### Actual behavior\r\n<!-- Tell us what happens instead. Include screenshots if this an issue with the GUI. -->\r\n\r\n### Steps to reproduce\r\n<!-- Please provide a minimal reproducible code example (https://stackoverflow.com/help/minimal-reproducible-example) --> \r\n\r\n### Environment\r\n\r\n- OS:\r\n- Python version:\r\n- Locust version: (please dont file issues for anything but the most recent release or prerelease builds)\r\n- Locust command line that you ran:\r\n- Locust file contents (anonymized if necessary):\n", "before_files": [{"content": "from jinja2 import Environment, FileSystemLoader\nimport os\nimport pathlib\nimport datetime\nfrom itertools import chain\nfrom .stats import sort_stats\nfrom .user.inspectuser import get_task_ratio_dict\nfrom html import escape\nfrom json import dumps\n\n\ndef render_template(file, **kwargs):\n templates_path = os.path.join(pathlib.Path(__file__).parent.absolute(), \"templates\")\n env = Environment(loader=FileSystemLoader(templates_path), extensions=[\"jinja2.ext.do\"])\n template = env.get_template(file)\n return template.render(**kwargs)\n\n\ndef get_html_report(environment, show_download_link=True):\n stats = environment.runner.stats\n\n start_ts = stats.start_time\n start_time = datetime.datetime.fromtimestamp(start_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n end_ts = stats.last_request_timestamp\n if end_ts:\n end_time = datetime.datetime.fromtimestamp(end_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n end_time = start_time\n\n host = None\n if environment.host:\n host = environment.host\n elif environment.runner.user_classes:\n all_hosts = set([l.host for l in environment.runner.user_classes])\n if len(all_hosts) == 1:\n host = list(all_hosts)[0]\n\n requests_statistics = list(chain(sort_stats(stats.entries), [stats.total]))\n failures_statistics = sort_stats(stats.errors)\n exceptions_statistics = [\n {**exc, \"nodes\": \", \".join(exc[\"nodes\"])} for exc in environment.runner.exceptions.values()\n ]\n\n history = stats.history\n\n static_js = []\n js_files = [\"jquery-1.11.3.min.js\", \"echarts.common.min.js\", \"vintage.js\", \"chart.js\", \"tasks.js\"]\n for js_file in js_files:\n path = os.path.join(os.path.dirname(__file__), \"static\", js_file)\n static_js.append(\"// \" + js_file)\n with open(path, encoding=\"utf8\") as f:\n static_js.append(f.read())\n static_js.extend([\"\", \"\"])\n\n static_css = []\n css_files = [\"tables.css\"]\n for css_file in css_files:\n path = os.path.join(os.path.dirname(__file__), \"static\", \"css\", css_file)\n static_css.append(\"/* \" + css_file + \" */\")\n with open(path, encoding=\"utf8\") as f:\n static_css.append(f.read())\n static_css.extend([\"\", \"\"])\n\n task_data = {\n \"per_class\": get_task_ratio_dict(environment.user_classes),\n \"total\": get_task_ratio_dict(environment.user_classes, total=True),\n }\n\n res = render_template(\n \"report.html\",\n int=int,\n round=round,\n requests_statistics=requests_statistics,\n failures_statistics=failures_statistics,\n exceptions_statistics=exceptions_statistics,\n start_time=start_time,\n end_time=end_time,\n host=host,\n history=history,\n static_js=\"\\n\".join(static_js),\n static_css=\"\\n\".join(static_css),\n show_download_link=show_download_link,\n locustfile=environment.locustfile,\n tasks=escape(dumps(task_data)),\n )\n\n return res\n", "path": "locust/html.py"}], "after_files": [{"content": "from jinja2 import Environment, FileSystemLoader\nimport os\nimport pathlib\nimport datetime\nfrom itertools import chain\nfrom .stats import sort_stats\nfrom .user.inspectuser import get_task_ratio_dict\nfrom html import escape\nfrom json import dumps\n\n\ndef render_template(file, **kwargs):\n templates_path = os.path.join(pathlib.Path(__file__).parent.absolute(), \"templates\")\n env = Environment(loader=FileSystemLoader(templates_path), extensions=[\"jinja2.ext.do\"])\n template = env.get_template(file)\n return template.render(**kwargs)\n\n\ndef get_html_report(environment, show_download_link=True):\n stats = environment.runner.stats\n\n start_ts = stats.start_time\n start_time = datetime.datetime.utcfromtimestamp(start_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n end_ts = stats.last_request_timestamp\n if end_ts:\n end_time = datetime.datetime.utcfromtimestamp(end_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n end_time = start_time\n\n host = None\n if environment.host:\n host = environment.host\n elif environment.runner.user_classes:\n all_hosts = set([l.host for l in environment.runner.user_classes])\n if len(all_hosts) == 1:\n host = list(all_hosts)[0]\n\n requests_statistics = list(chain(sort_stats(stats.entries), [stats.total]))\n failures_statistics = sort_stats(stats.errors)\n exceptions_statistics = [\n {**exc, \"nodes\": \", \".join(exc[\"nodes\"])} for exc in environment.runner.exceptions.values()\n ]\n\n history = stats.history\n\n static_js = []\n js_files = [\"jquery-1.11.3.min.js\", \"echarts.common.min.js\", \"vintage.js\", \"chart.js\", \"tasks.js\"]\n for js_file in js_files:\n path = os.path.join(os.path.dirname(__file__), \"static\", js_file)\n static_js.append(\"// \" + js_file)\n with open(path, encoding=\"utf8\") as f:\n static_js.append(f.read())\n static_js.extend([\"\", \"\"])\n\n static_css = []\n css_files = [\"tables.css\"]\n for css_file in css_files:\n path = os.path.join(os.path.dirname(__file__), \"static\", \"css\", css_file)\n static_css.append(\"/* \" + css_file + \" */\")\n with open(path, encoding=\"utf8\") as f:\n static_css.append(f.read())\n static_css.extend([\"\", \"\"])\n\n task_data = {\n \"per_class\": get_task_ratio_dict(environment.user_classes),\n \"total\": get_task_ratio_dict(environment.user_classes, total=True),\n }\n\n res = render_template(\n \"report.html\",\n int=int,\n round=round,\n requests_statistics=requests_statistics,\n failures_statistics=failures_statistics,\n exceptions_statistics=exceptions_statistics,\n start_time=start_time,\n end_time=end_time,\n host=host,\n history=history,\n static_js=\"\\n\".join(static_js),\n static_css=\"\\n\".join(static_css),\n show_download_link=show_download_link,\n locustfile=environment.locustfile,\n tasks=escape(dumps(task_data)),\n )\n\n return res\n", "path": "locust/html.py"}]}
1,536
192
gh_patches_debug_5105
rasdani/github-patches
git_diff
encode__httpx-2442
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support httpcore>=0.16.0 Hi, I have a project that uses `fastapi` and `uvicorn` which demands `h11>=0.8`. Latest version of `fastapi` changed their test suite from `requests` to `httpx`. So I had to add `httpx` to my tests requirements. Now the problem is, that `httpx` requires `httpcore`, which since version `0.16.0` allows for `h11==0.14.0`, but the current version of `httpcore` required by `httpx` doesn't, so I end up having to downgrade the `h11==0.14.0` which I have to `0.12.0`. Can the constraint for `httpcore` be updated to `>=0.16.0` here? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import sys 2 3 from setuptools import setup 4 5 sys.stderr.write( 6 """ 7 =============================== 8 Unsupported installation method 9 =============================== 10 httpx no longer supports installation with `python setup.py install`. 11 Please use `python -m pip install .` instead. 12 """ 13 ) 14 sys.exit(1) 15 16 17 # The below code will never execute, however GitHub is particularly 18 # picky about where it finds Python packaging metadata. 19 # See: https://github.com/github/feedback/discussions/6456 20 # 21 # To be removed once GitHub catches up. 22 23 setup( 24 name="httpx", 25 install_requires=[ 26 "certifi", 27 "sniffio", 28 "rfc3986[idna2008]>=1.3,<2", 29 "httpcore>=0.15.0,<0.16.0", 30 ], 31 ) 32 ``` Path: `httpx/__version__.py` Content: ``` 1 __title__ = "httpx" 2 __description__ = "A next generation HTTP client, for Python 3." 3 __version__ = "0.23.0" 4 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/httpx/__version__.py b/httpx/__version__.py --- a/httpx/__version__.py +++ b/httpx/__version__.py @@ -1,3 +1,3 @@ __title__ = "httpx" __description__ = "A next generation HTTP client, for Python 3." -__version__ = "0.23.0" +__version__ = "0.23.1" diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -26,6 +26,6 @@ "certifi", "sniffio", "rfc3986[idna2008]>=1.3,<2", - "httpcore>=0.15.0,<0.16.0", + "httpcore>=0.15.0,<0.17.0", ], )
{"golden_diff": "diff --git a/httpx/__version__.py b/httpx/__version__.py\n--- a/httpx/__version__.py\n+++ b/httpx/__version__.py\n@@ -1,3 +1,3 @@\n __title__ = \"httpx\"\n __description__ = \"A next generation HTTP client, for Python 3.\"\n-__version__ = \"0.23.0\"\n+__version__ = \"0.23.1\"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,6 +26,6 @@\n \"certifi\",\n \"sniffio\",\n \"rfc3986[idna2008]>=1.3,<2\",\n- \"httpcore>=0.15.0,<0.16.0\",\n+ \"httpcore>=0.15.0,<0.17.0\",\n ],\n )\n", "issue": "Support httpcore>=0.16.0\nHi,\r\n\r\nI have a project that uses `fastapi` and `uvicorn` which demands `h11>=0.8`. Latest version of `fastapi` changed their test suite from `requests` to `httpx`. So I had to add `httpx` to my tests requirements.\r\n\r\nNow the problem is, that `httpx` requires `httpcore`, which since version `0.16.0` allows for `h11==0.14.0`, but the current version of `httpcore` required by `httpx` doesn't, so I end up having to downgrade the `h11==0.14.0` which I have to `0.12.0`.\r\n\r\nCan the constraint for `httpcore` be updated to `>=0.16.0` here?\n", "before_files": [{"content": "import sys\n\nfrom setuptools import setup\n\nsys.stderr.write(\n \"\"\"\n===============================\nUnsupported installation method\n===============================\nhttpx no longer supports installation with `python setup.py install`.\nPlease use `python -m pip install .` instead.\n\"\"\"\n)\nsys.exit(1)\n\n\n# The below code will never execute, however GitHub is particularly\n# picky about where it finds Python packaging metadata.\n# See: https://github.com/github/feedback/discussions/6456\n#\n# To be removed once GitHub catches up.\n\nsetup(\n name=\"httpx\",\n install_requires=[\n \"certifi\",\n \"sniffio\",\n \"rfc3986[idna2008]>=1.3,<2\",\n \"httpcore>=0.15.0,<0.16.0\",\n ],\n)\n", "path": "setup.py"}, {"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.23.0\"\n", "path": "httpx/__version__.py"}], "after_files": [{"content": "import sys\n\nfrom setuptools import setup\n\nsys.stderr.write(\n \"\"\"\n===============================\nUnsupported installation method\n===============================\nhttpx no longer supports installation with `python setup.py install`.\nPlease use `python -m pip install .` instead.\n\"\"\"\n)\nsys.exit(1)\n\n\n# The below code will never execute, however GitHub is particularly\n# picky about where it finds Python packaging metadata.\n# See: https://github.com/github/feedback/discussions/6456\n#\n# To be removed once GitHub catches up.\n\nsetup(\n name=\"httpx\",\n install_requires=[\n \"certifi\",\n \"sniffio\",\n \"rfc3986[idna2008]>=1.3,<2\",\n \"httpcore>=0.15.0,<0.17.0\",\n ],\n)\n", "path": "setup.py"}, {"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.23.1\"\n", "path": "httpx/__version__.py"}]}
731
203
gh_patches_debug_210
rasdani/github-patches
git_diff
xonsh__xonsh-3049
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Exception on startup (pygments_cache) <!--- Provide a general summary of the issue in the Title above --> <!--- If you have a question along the lines of "How do I do this Bash command in xonsh" please first look over the Bash to Xonsh translation guide: http://xon.sh/bash_to_xsh.html If you don't find an answer there, please do open an issue! --> ## xonfig <!--- Please post the output of the `xonfig` command (run from inside xonsh) so we know more about your current setup --> ## Expected Behavior <!--- Tell us what should happen --> ## Current Behavior <!--- Tell us what happens instead of the expected behavior --> <!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error To enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`. On Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` --> ## Steps to Reproduce <!--- Please try to write out a minimal reproducible snippet to trigger the bug, it will help us fix it! --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `xonsh/ptk2/__init__.py` Content: ``` 1 # must come before ptk / pygments imports 2 from xonsh.lazyasd import load_module_in_background 3 4 load_module_in_background( 5 "pkg_resources", 6 debug="XONSH_DEBUG", 7 replacements={"pygments.plugin": "pkg_resources"}, 8 ) 9 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/xonsh/ptk2/__init__.py b/xonsh/ptk2/__init__.py --- a/xonsh/ptk2/__init__.py +++ b/xonsh/ptk2/__init__.py @@ -1,8 +0,0 @@ -# must come before ptk / pygments imports -from xonsh.lazyasd import load_module_in_background - -load_module_in_background( - "pkg_resources", - debug="XONSH_DEBUG", - replacements={"pygments.plugin": "pkg_resources"}, -)
{"golden_diff": "diff --git a/xonsh/ptk2/__init__.py b/xonsh/ptk2/__init__.py\n--- a/xonsh/ptk2/__init__.py\n+++ b/xonsh/ptk2/__init__.py\n@@ -1,8 +0,0 @@\n-# must come before ptk / pygments imports\n-from xonsh.lazyasd import load_module_in_background\n-\n-load_module_in_background(\n- \"pkg_resources\",\n- debug=\"XONSH_DEBUG\",\n- replacements={\"pygments.plugin\": \"pkg_resources\"},\n-)\n", "issue": "Exception on startup (pygments_cache)\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- If you have a question along the lines of \"How do I do this Bash command in xonsh\"\r\nplease first look over the Bash to Xonsh translation guide: http://xon.sh/bash_to_xsh.html\r\nIf you don't find an answer there, please do open an issue! -->\r\n\r\n## xonfig\r\n<!--- Please post the output of the `xonfig` command (run from inside xonsh) so we know more about your current setup -->\r\n\r\n## Expected Behavior\r\n<!--- Tell us what should happen -->\r\n\r\n## Current Behavior\r\n<!--- Tell us what happens instead of the expected behavior -->\r\n<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error\r\nTo enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`. \r\nOn Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->\r\n\r\n## Steps to Reproduce\r\n<!--- Please try to write out a minimal reproducible snippet to trigger the bug, it will help us fix it! -->\r\n\n", "before_files": [{"content": "# must come before ptk / pygments imports\nfrom xonsh.lazyasd import load_module_in_background\n\nload_module_in_background(\n \"pkg_resources\",\n debug=\"XONSH_DEBUG\",\n replacements={\"pygments.plugin\": \"pkg_resources\"},\n)\n", "path": "xonsh/ptk2/__init__.py"}], "after_files": [{"content": "", "path": "xonsh/ptk2/__init__.py"}]}
584
125
gh_patches_debug_33044
rasdani/github-patches
git_diff
vispy__vispy-2135
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove warning on import about missing networkx dependency Currently, importing vispy (or some submodule) without networkx installed results in a warning about installing networkx, even if the user/downstream library has no intention of using the graph layout. The warning should be delayed and turned into an error, as noted by @djhoese [here](https://github.com/napari/napari/issues/2979#issuecomment-874159877). xref napari/napari#2979 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `vispy/visuals/graphs/layouts/networkx_layout.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright (c) Vispy Development Team. All Rights Reserved. 3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.#!/usr/bin/env python3 4 from ..util import _straight_line_vertices, issparse 5 import numpy as np 6 try: 7 import networkx as nx 8 except ModuleNotFoundError: 9 import warnings 10 warnings.warn( 11 "Networkx not found, please install network to use its layouts") 12 nx = None 13 14 15 class NetworkxCoordinates: 16 def __init__(self, graph=None, layout=None, **kwargs): 17 """ 18 Converts :graph: into a layout. Can be used in conjunction with networkx layouts or using raw 2D-numpy arrays. 19 20 Parameters 21 ---------- 22 graph : a networkx graph. 23 layout : str or dict or iterable-object of float32, optional 24 - When :layout: is s string, a lookup will be performed in the networkx avaiable layouts. 25 - When :layout: is a dict, it will be assumed that it takes the shape (key, value) = (node_id, 2D-coordinate). 26 - When :layout: is numpy array it is assumed it takes the shape (number_of_nodes, 2). 27 kwargs: dict, optional 28 when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx 29 """ 30 if isinstance(graph, type(None)): 31 raise ValueError("Requires networkx input") 32 self.graph = graph 33 self.positions = np.zeros((len(graph), 2), dtype=np.float32) 34 # default random positions 35 if isinstance(layout, type(None)): 36 self.positions = np.random.rand(*self.positions.shape) 37 38 # check for networkx 39 elif isinstance(layout, str): 40 if nx: 41 if not layout.endswith("_layout"): 42 layout += "_layout" # append for nx 43 layout_function = getattr(nx, layout) 44 if layout_function: 45 self.positions = np.asarray( 46 [i for i in dict(layout_function(graph, **kwargs)).values()]) 47 else: 48 raise ValueError("Check networkx for layouts") 49 else: 50 raise ValueError("networkx not found") 51 # assume dict from networkx; values are 2-array 52 elif isinstance(layout, dict): 53 self.positions = np.asarray([i for i in layout.values()]) 54 55 # assume given values 56 elif isinstance(layout, np.ndarray): 57 assert layout.ndim == 2 58 assert layout.shape[0] == len(graph) 59 self.positions = layout 60 else: 61 raise ValueError("Input not understood") 62 63 # normalize coordinates 64 self.positions = (self.positions - self.positions.min()) / \ 65 (self.positions.max() - self.positions.min()) 66 self.positions = self.positions.astype(np.float32) 67 68 def __call__(self, adjacency_mat, directed=False): 69 """ 70 Parameters 71 ---------- 72 adjacency_mat : sparse adjacency matrix. 73 directed : bool, default False 74 75 Returns 76 --------- 77 (node_vertices, line_vertices, arrow_vertices) : tuple 78 Yields the node and line vertices in a tuple. This layout only yields a 79 single time, and has no builtin animation 80 """ 81 if issparse(adjacency_mat): 82 adjacency_mat = adjacency_mat.tocoo() 83 line_vertices, arrows = _straight_line_vertices( 84 adjacency_mat, self.positions, directed) 85 86 yield self.positions, line_vertices, arrows 87 88 @property 89 def adj(self): 90 """Convenient storage and holder of the adjacency matrix for the :scene.visuals.Graph: function.""" 91 return nx.adjacency_matrix(self.graph) 92 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/vispy/visuals/graphs/layouts/networkx_layout.py b/vispy/visuals/graphs/layouts/networkx_layout.py --- a/vispy/visuals/graphs/layouts/networkx_layout.py +++ b/vispy/visuals/graphs/layouts/networkx_layout.py @@ -6,9 +6,6 @@ try: import networkx as nx except ModuleNotFoundError: - import warnings - warnings.warn( - "Networkx not found, please install network to use its layouts") nx = None @@ -27,6 +24,8 @@ kwargs: dict, optional when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx """ + if nx is None: + raise ValueError("networkx not found, please install networkx to use its layouts") if isinstance(graph, type(None)): raise ValueError("Requires networkx input") self.graph = graph @@ -37,17 +36,14 @@ # check for networkx elif isinstance(layout, str): - if nx: - if not layout.endswith("_layout"): - layout += "_layout" # append for nx - layout_function = getattr(nx, layout) - if layout_function: - self.positions = np.asarray( - [i for i in dict(layout_function(graph, **kwargs)).values()]) - else: - raise ValueError("Check networkx for layouts") + if not layout.endswith("_layout"): + layout += "_layout" # append for nx + layout_function = getattr(nx, layout) + if layout_function: + self.positions = np.asarray( + [i for i in dict(layout_function(graph, **kwargs)).values()]) else: - raise ValueError("networkx not found") + raise ValueError("Check networkx for layouts") # assume dict from networkx; values are 2-array elif isinstance(layout, dict): self.positions = np.asarray([i for i in layout.values()])
{"golden_diff": "diff --git a/vispy/visuals/graphs/layouts/networkx_layout.py b/vispy/visuals/graphs/layouts/networkx_layout.py\n--- a/vispy/visuals/graphs/layouts/networkx_layout.py\n+++ b/vispy/visuals/graphs/layouts/networkx_layout.py\n@@ -6,9 +6,6 @@\n try:\n import networkx as nx\n except ModuleNotFoundError:\n- import warnings\n- warnings.warn(\n- \"Networkx not found, please install network to use its layouts\")\n nx = None\n \n \n@@ -27,6 +24,8 @@\n kwargs: dict, optional\n when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx\n \"\"\"\n+ if nx is None:\n+ raise ValueError(\"networkx not found, please install networkx to use its layouts\")\n if isinstance(graph, type(None)):\n raise ValueError(\"Requires networkx input\")\n self.graph = graph\n@@ -37,17 +36,14 @@\n \n # check for networkx\n elif isinstance(layout, str):\n- if nx:\n- if not layout.endswith(\"_layout\"):\n- layout += \"_layout\" # append for nx\n- layout_function = getattr(nx, layout)\n- if layout_function:\n- self.positions = np.asarray(\n- [i for i in dict(layout_function(graph, **kwargs)).values()])\n- else:\n- raise ValueError(\"Check networkx for layouts\")\n+ if not layout.endswith(\"_layout\"):\n+ layout += \"_layout\" # append for nx\n+ layout_function = getattr(nx, layout)\n+ if layout_function:\n+ self.positions = np.asarray(\n+ [i for i in dict(layout_function(graph, **kwargs)).values()])\n else:\n- raise ValueError(\"networkx not found\")\n+ raise ValueError(\"Check networkx for layouts\")\n # assume dict from networkx; values are 2-array\n elif isinstance(layout, dict):\n self.positions = np.asarray([i for i in layout.values()])\n", "issue": "Remove warning on import about missing networkx dependency\nCurrently, importing vispy (or some submodule) without networkx installed results in a warning about installing networkx, even if the user/downstream library has no intention of using the graph layout. The warning should be delayed and turned into an error, as noted by @djhoese [here](https://github.com/napari/napari/issues/2979#issuecomment-874159877).\r\n\r\nxref napari/napari#2979\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.#!/usr/bin/env python3\nfrom ..util import _straight_line_vertices, issparse\nimport numpy as np\ntry:\n import networkx as nx\nexcept ModuleNotFoundError:\n import warnings\n warnings.warn(\n \"Networkx not found, please install network to use its layouts\")\n nx = None\n\n\nclass NetworkxCoordinates:\n def __init__(self, graph=None, layout=None, **kwargs):\n \"\"\"\n Converts :graph: into a layout. Can be used in conjunction with networkx layouts or using raw 2D-numpy arrays.\n\n Parameters\n ----------\n graph : a networkx graph.\n layout : str or dict or iterable-object of float32, optional\n - When :layout: is s string, a lookup will be performed in the networkx avaiable layouts.\n - When :layout: is a dict, it will be assumed that it takes the shape (key, value) = (node_id, 2D-coordinate).\n - When :layout: is numpy array it is assumed it takes the shape (number_of_nodes, 2).\n kwargs: dict, optional\n when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx\n \"\"\"\n if isinstance(graph, type(None)):\n raise ValueError(\"Requires networkx input\")\n self.graph = graph\n self.positions = np.zeros((len(graph), 2), dtype=np.float32)\n # default random positions\n if isinstance(layout, type(None)):\n self.positions = np.random.rand(*self.positions.shape)\n\n # check for networkx\n elif isinstance(layout, str):\n if nx:\n if not layout.endswith(\"_layout\"):\n layout += \"_layout\" # append for nx\n layout_function = getattr(nx, layout)\n if layout_function:\n self.positions = np.asarray(\n [i for i in dict(layout_function(graph, **kwargs)).values()])\n else:\n raise ValueError(\"Check networkx for layouts\")\n else:\n raise ValueError(\"networkx not found\")\n # assume dict from networkx; values are 2-array\n elif isinstance(layout, dict):\n self.positions = np.asarray([i for i in layout.values()])\n\n # assume given values\n elif isinstance(layout, np.ndarray):\n assert layout.ndim == 2\n assert layout.shape[0] == len(graph)\n self.positions = layout\n else:\n raise ValueError(\"Input not understood\")\n\n # normalize coordinates\n self.positions = (self.positions - self.positions.min()) / \\\n (self.positions.max() - self.positions.min())\n self.positions = self.positions.astype(np.float32)\n\n def __call__(self, adjacency_mat, directed=False):\n \"\"\"\n Parameters\n ----------\n adjacency_mat : sparse adjacency matrix.\n directed : bool, default False\n\n Returns\n ---------\n (node_vertices, line_vertices, arrow_vertices) : tuple\n Yields the node and line vertices in a tuple. This layout only yields a\n single time, and has no builtin animation\n \"\"\"\n if issparse(adjacency_mat):\n adjacency_mat = adjacency_mat.tocoo()\n line_vertices, arrows = _straight_line_vertices(\n adjacency_mat, self.positions, directed)\n\n yield self.positions, line_vertices, arrows\n\n @property\n def adj(self):\n \"\"\"Convenient storage and holder of the adjacency matrix for the :scene.visuals.Graph: function.\"\"\"\n return nx.adjacency_matrix(self.graph)\n", "path": "vispy/visuals/graphs/layouts/networkx_layout.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.#!/usr/bin/env python3\nfrom ..util import _straight_line_vertices, issparse\nimport numpy as np\ntry:\n import networkx as nx\nexcept ModuleNotFoundError:\n nx = None\n\n\nclass NetworkxCoordinates:\n def __init__(self, graph=None, layout=None, **kwargs):\n \"\"\"\n Converts :graph: into a layout. Can be used in conjunction with networkx layouts or using raw 2D-numpy arrays.\n\n Parameters\n ----------\n graph : a networkx graph.\n layout : str or dict or iterable-object of float32, optional\n - When :layout: is s string, a lookup will be performed in the networkx avaiable layouts.\n - When :layout: is a dict, it will be assumed that it takes the shape (key, value) = (node_id, 2D-coordinate).\n - When :layout: is numpy array it is assumed it takes the shape (number_of_nodes, 2).\n kwargs: dict, optional\n when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx\n \"\"\"\n if nx is None:\n raise ValueError(\"networkx not found, please install networkx to use its layouts\")\n if isinstance(graph, type(None)):\n raise ValueError(\"Requires networkx input\")\n self.graph = graph\n self.positions = np.zeros((len(graph), 2), dtype=np.float32)\n # default random positions\n if isinstance(layout, type(None)):\n self.positions = np.random.rand(*self.positions.shape)\n\n # check for networkx\n elif isinstance(layout, str):\n if not layout.endswith(\"_layout\"):\n layout += \"_layout\" # append for nx\n layout_function = getattr(nx, layout)\n if layout_function:\n self.positions = np.asarray(\n [i for i in dict(layout_function(graph, **kwargs)).values()])\n else:\n raise ValueError(\"Check networkx for layouts\")\n # assume dict from networkx; values are 2-array\n elif isinstance(layout, dict):\n self.positions = np.asarray([i for i in layout.values()])\n\n # assume given values\n elif isinstance(layout, np.ndarray):\n assert layout.ndim == 2\n assert layout.shape[0] == len(graph)\n self.positions = layout\n else:\n raise ValueError(\"Input not understood\")\n\n # normalize coordinates\n self.positions = (self.positions - self.positions.min()) / \\\n (self.positions.max() - self.positions.min())\n self.positions = self.positions.astype(np.float32)\n\n def __call__(self, adjacency_mat, directed=False):\n \"\"\"\n Parameters\n ----------\n adjacency_mat : sparse adjacency matrix.\n directed : bool, default False\n\n Returns\n ---------\n (node_vertices, line_vertices, arrow_vertices) : tuple\n Yields the node and line vertices in a tuple. This layout only yields a\n single time, and has no builtin animation\n \"\"\"\n if issparse(adjacency_mat):\n adjacency_mat = adjacency_mat.tocoo()\n line_vertices, arrows = _straight_line_vertices(\n adjacency_mat, self.positions, directed)\n\n yield self.positions, line_vertices, arrows\n\n @property\n def adj(self):\n \"\"\"Convenient storage and holder of the adjacency matrix for the :scene.visuals.Graph: function.\"\"\"\n return nx.adjacency_matrix(self.graph)\n", "path": "vispy/visuals/graphs/layouts/networkx_layout.py"}]}
1,333
447
gh_patches_debug_59198
rasdani/github-patches
git_diff
Nitrate__Nitrate-319
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix filter calls Similar with the calls to `map`, these calls must be fixed as well. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tcms/testplans/helpers/email.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from django.conf import settings 3 4 from tcms.core.utils.mailto import send_email_using_threading 5 6 7 def email_plan_update(plan): 8 recipients = get_plan_notification_recipients(plan) 9 if len(recipients) == 0: 10 return 11 subject = u'TestPlan %s has been updated.' % plan.pk 12 send_email_using_threading(settings.PLAN_EMAIL_TEMPLATE, subject, 13 recipients, {'plan': plan}) 14 15 16 def email_plan_deletion(plan): 17 recipients = get_plan_notification_recipients(plan) 18 if len(recipients) == 0: 19 return 20 subject = u'TestPlan %s has been deleted.' % plan.pk 21 send_email_using_threading(settings.PLAN_DELELE_EMAIL_TEMPLATE, subject, 22 recipients, {'plan': plan}) 23 24 25 def get_plan_notification_recipients(plan): 26 recipients = set() 27 if plan.owner: 28 if plan.email_settings.auto_to_plan_owner: 29 recipients.add(plan.owner.email) 30 if plan.email_settings.auto_to_plan_author: 31 recipients.add(plan.author.email) 32 if plan.email_settings.auto_to_case_owner: 33 case_authors = plan.case.values_list('author__email', flat=True) 34 recipients.update(case_authors) 35 if plan.email_settings.auto_to_case_default_tester: 36 case_testers = plan.case.values_list('default_tester__email', 37 flat=True) 38 recipients.update(case_testers) 39 return filter(lambda e: bool(e), recipients) 40 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tcms/testplans/helpers/email.py b/tcms/testplans/helpers/email.py --- a/tcms/testplans/helpers/email.py +++ b/tcms/testplans/helpers/email.py @@ -36,4 +36,4 @@ case_testers = plan.case.values_list('default_tester__email', flat=True) recipients.update(case_testers) - return filter(lambda e: bool(e), recipients) + return [r for r in recipients if r]
{"golden_diff": "diff --git a/tcms/testplans/helpers/email.py b/tcms/testplans/helpers/email.py\n--- a/tcms/testplans/helpers/email.py\n+++ b/tcms/testplans/helpers/email.py\n@@ -36,4 +36,4 @@\n case_testers = plan.case.values_list('default_tester__email',\n flat=True)\n recipients.update(case_testers)\n- return filter(lambda e: bool(e), recipients)\n+ return [r for r in recipients if r]\n", "issue": "Fix filter calls\nSimilar with the calls to `map`, these calls must be fixed as well.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\n\nfrom tcms.core.utils.mailto import send_email_using_threading\n\n\ndef email_plan_update(plan):\n recipients = get_plan_notification_recipients(plan)\n if len(recipients) == 0:\n return\n subject = u'TestPlan %s has been updated.' % plan.pk\n send_email_using_threading(settings.PLAN_EMAIL_TEMPLATE, subject,\n recipients, {'plan': plan})\n\n\ndef email_plan_deletion(plan):\n recipients = get_plan_notification_recipients(plan)\n if len(recipients) == 0:\n return\n subject = u'TestPlan %s has been deleted.' % plan.pk\n send_email_using_threading(settings.PLAN_DELELE_EMAIL_TEMPLATE, subject,\n recipients, {'plan': plan})\n\n\ndef get_plan_notification_recipients(plan):\n recipients = set()\n if plan.owner:\n if plan.email_settings.auto_to_plan_owner:\n recipients.add(plan.owner.email)\n if plan.email_settings.auto_to_plan_author:\n recipients.add(plan.author.email)\n if plan.email_settings.auto_to_case_owner:\n case_authors = plan.case.values_list('author__email', flat=True)\n recipients.update(case_authors)\n if plan.email_settings.auto_to_case_default_tester:\n case_testers = plan.case.values_list('default_tester__email',\n flat=True)\n recipients.update(case_testers)\n return filter(lambda e: bool(e), recipients)\n", "path": "tcms/testplans/helpers/email.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\n\nfrom tcms.core.utils.mailto import send_email_using_threading\n\n\ndef email_plan_update(plan):\n recipients = get_plan_notification_recipients(plan)\n if len(recipients) == 0:\n return\n subject = u'TestPlan %s has been updated.' % plan.pk\n send_email_using_threading(settings.PLAN_EMAIL_TEMPLATE, subject,\n recipients, {'plan': plan})\n\n\ndef email_plan_deletion(plan):\n recipients = get_plan_notification_recipients(plan)\n if len(recipients) == 0:\n return\n subject = u'TestPlan %s has been deleted.' % plan.pk\n send_email_using_threading(settings.PLAN_DELELE_EMAIL_TEMPLATE, subject,\n recipients, {'plan': plan})\n\n\ndef get_plan_notification_recipients(plan):\n recipients = set()\n if plan.owner:\n if plan.email_settings.auto_to_plan_owner:\n recipients.add(plan.owner.email)\n if plan.email_settings.auto_to_plan_author:\n recipients.add(plan.author.email)\n if plan.email_settings.auto_to_case_owner:\n case_authors = plan.case.values_list('author__email', flat=True)\n recipients.update(case_authors)\n if plan.email_settings.auto_to_case_default_tester:\n case_testers = plan.case.values_list('default_tester__email',\n flat=True)\n recipients.update(case_testers)\n return [r for r in recipients if r]\n", "path": "tcms/testplans/helpers/email.py"}]}
663
103
gh_patches_debug_2599
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-3323
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `colossalai/fx/tracer/_tracer_utils.py` Content: ``` 1 from typing import List, Union, Any 2 from ..proxy import ColoProxy, ColoAttribute 3 import torch 4 from .meta_patch import meta_patched_function, meta_patched_module 5 6 __all__ = ['is_element_in_list', 'extract_meta'] 7 8 9 def is_element_in_list(elements: Union[List[Any], Any], list_: List[Any]): 10 if isinstance(elements, (tuple, list, set)): 11 for ele in elements: 12 if ele not in list_: 13 return False, ele 14 else: 15 if elements not in list_: 16 return False, elements 17 18 return True, None 19 20 21 def extract_meta(*args, **kwargs): 22 23 def _convert(val): 24 if isinstance(val, ColoProxy): 25 return val.meta_data 26 elif isinstance(val, (list, tuple)): 27 return type(val)([_convert(ele) for ele in val]) 28 29 return val 30 31 new_args = [_convert(val) for val in args] 32 new_kwargs = {k: _convert(v) for k, v in kwargs.items()} 33 return new_args, new_kwargs 34 35 36 def compute_meta_data_for_functions_proxy(target, args, kwargs): 37 args_metas, kwargs_metas = extract_meta(*args, **kwargs) 38 39 # fetch patched function 40 if meta_patched_function.has(target): 41 meta_target = meta_patched_function.get(target) 42 elif meta_patched_function.has(target.__name__): 43 meta_target = meta_patched_function.get(target.__name__) 44 else: 45 meta_target = target 46 meta_out = meta_target(*args_metas, **kwargs_metas) 47 if isinstance(meta_out, torch.Tensor): 48 meta_out = meta_out.to(device="meta") 49 50 return meta_out 51 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/colossalai/fx/tracer/_tracer_utils.py b/colossalai/fx/tracer/_tracer_utils.py --- a/colossalai/fx/tracer/_tracer_utils.py +++ b/colossalai/fx/tracer/_tracer_utils.py @@ -1,6 +1,8 @@ -from typing import List, Union, Any -from ..proxy import ColoProxy, ColoAttribute +from typing import Any, List, Union + import torch + +from ..proxy import ColoAttribute, ColoProxy from .meta_patch import meta_patched_function, meta_patched_module __all__ = ['is_element_in_list', 'extract_meta']
{"golden_diff": "diff --git a/colossalai/fx/tracer/_tracer_utils.py b/colossalai/fx/tracer/_tracer_utils.py\n--- a/colossalai/fx/tracer/_tracer_utils.py\n+++ b/colossalai/fx/tracer/_tracer_utils.py\n@@ -1,6 +1,8 @@\n-from typing import List, Union, Any\n-from ..proxy import ColoProxy, ColoAttribute\n+from typing import Any, List, Union\n+\n import torch\n+\n+from ..proxy import ColoAttribute, ColoProxy\n from .meta_patch import meta_patched_function, meta_patched_module\n \n __all__ = ['is_element_in_list', 'extract_meta']\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import List, Union, Any\nfrom ..proxy import ColoProxy, ColoAttribute\nimport torch\nfrom .meta_patch import meta_patched_function, meta_patched_module\n\n__all__ = ['is_element_in_list', 'extract_meta']\n\n\ndef is_element_in_list(elements: Union[List[Any], Any], list_: List[Any]):\n if isinstance(elements, (tuple, list, set)):\n for ele in elements:\n if ele not in list_:\n return False, ele\n else:\n if elements not in list_:\n return False, elements\n\n return True, None\n\n\ndef extract_meta(*args, **kwargs):\n\n def _convert(val):\n if isinstance(val, ColoProxy):\n return val.meta_data\n elif isinstance(val, (list, tuple)):\n return type(val)([_convert(ele) for ele in val])\n\n return val\n\n new_args = [_convert(val) for val in args]\n new_kwargs = {k: _convert(v) for k, v in kwargs.items()}\n return new_args, new_kwargs\n\n\ndef compute_meta_data_for_functions_proxy(target, args, kwargs):\n args_metas, kwargs_metas = extract_meta(*args, **kwargs)\n\n # fetch patched function\n if meta_patched_function.has(target):\n meta_target = meta_patched_function.get(target)\n elif meta_patched_function.has(target.__name__):\n meta_target = meta_patched_function.get(target.__name__)\n else:\n meta_target = target\n meta_out = meta_target(*args_metas, **kwargs_metas)\n if isinstance(meta_out, torch.Tensor):\n meta_out = meta_out.to(device=\"meta\")\n\n return meta_out\n", "path": "colossalai/fx/tracer/_tracer_utils.py"}], "after_files": [{"content": "from typing import Any, List, Union\n\nimport torch\n\nfrom ..proxy import ColoAttribute, ColoProxy\nfrom .meta_patch import meta_patched_function, meta_patched_module\n\n__all__ = ['is_element_in_list', 'extract_meta']\n\n\ndef is_element_in_list(elements: Union[List[Any], Any], list_: List[Any]):\n if isinstance(elements, (tuple, list, set)):\n for ele in elements:\n if ele not in list_:\n return False, ele\n else:\n if elements not in list_:\n return False, elements\n\n return True, None\n\n\ndef extract_meta(*args, **kwargs):\n\n def _convert(val):\n if isinstance(val, ColoProxy):\n return val.meta_data\n elif isinstance(val, (list, tuple)):\n return type(val)([_convert(ele) for ele in val])\n\n return val\n\n new_args = [_convert(val) for val in args]\n new_kwargs = {k: _convert(v) for k, v in kwargs.items()}\n return new_args, new_kwargs\n\n\ndef compute_meta_data_for_functions_proxy(target, args, kwargs):\n args_metas, kwargs_metas = extract_meta(*args, **kwargs)\n\n # fetch patched function\n if meta_patched_function.has(target):\n meta_target = meta_patched_function.get(target)\n elif meta_patched_function.has(target.__name__):\n meta_target = meta_patched_function.get(target.__name__)\n else:\n meta_target = target\n meta_out = meta_target(*args_metas, **kwargs_metas)\n if isinstance(meta_out, torch.Tensor):\n meta_out = meta_out.to(device=\"meta\")\n\n return meta_out\n", "path": "colossalai/fx/tracer/_tracer_utils.py"}]}
747
144
gh_patches_debug_5791
rasdani/github-patches
git_diff
paperless-ngx__paperless-ngx-195
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Other] Update name to paperless-ngx in docs https://github.com/paperless-ngx/paperless-ngx/issues/4#issuecomment-1039890021 > I would go through the places where the project name is mentioned and add the x to it. Only actually code (variable names) could remain unchanged for backwards compatibility. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/paperless/urls.py` Content: ``` 1 from django.conf.urls import include 2 from django.contrib import admin 3 from django.contrib.auth.decorators import login_required 4 from django.urls import path, re_path 5 from django.views.decorators.csrf import csrf_exempt 6 from django.views.generic import RedirectView 7 from rest_framework.authtoken import views 8 from rest_framework.routers import DefaultRouter 9 10 from django.utils.translation import gettext_lazy as _ 11 12 from django.conf import settings 13 14 from paperless.consumers import StatusConsumer 15 from documents.views import ( 16 CorrespondentViewSet, 17 UnifiedSearchViewSet, 18 LogViewSet, 19 TagViewSet, 20 DocumentTypeViewSet, 21 IndexView, 22 SearchAutoCompleteView, 23 StatisticsView, 24 PostDocumentView, 25 SavedViewViewSet, 26 BulkEditView, 27 SelectionDataView, 28 BulkDownloadView, 29 ) 30 from paperless.views import FaviconView 31 32 api_router = DefaultRouter() 33 api_router.register(r"correspondents", CorrespondentViewSet) 34 api_router.register(r"document_types", DocumentTypeViewSet) 35 api_router.register(r"documents", UnifiedSearchViewSet) 36 api_router.register(r"logs", LogViewSet, basename="logs") 37 api_router.register(r"tags", TagViewSet) 38 api_router.register(r"saved_views", SavedViewViewSet) 39 40 41 urlpatterns = [ 42 re_path( 43 r"^api/", 44 include( 45 [ 46 re_path( 47 r"^auth/", 48 include( 49 ("rest_framework.urls", "rest_framework"), 50 namespace="rest_framework", 51 ), 52 ), 53 re_path( 54 r"^search/autocomplete/", 55 SearchAutoCompleteView.as_view(), 56 name="autocomplete", 57 ), 58 re_path(r"^statistics/", StatisticsView.as_view(), name="statistics"), 59 re_path( 60 r"^documents/post_document/", 61 PostDocumentView.as_view(), 62 name="post_document", 63 ), 64 re_path( 65 r"^documents/bulk_edit/", BulkEditView.as_view(), name="bulk_edit" 66 ), 67 re_path( 68 r"^documents/selection_data/", 69 SelectionDataView.as_view(), 70 name="selection_data", 71 ), 72 re_path( 73 r"^documents/bulk_download/", 74 BulkDownloadView.as_view(), 75 name="bulk_download", 76 ), 77 path("token/", views.obtain_auth_token), 78 ] 79 + api_router.urls 80 ), 81 ), 82 re_path(r"^favicon.ico$", FaviconView.as_view(), name="favicon"), 83 re_path(r"admin/", admin.site.urls), 84 re_path( 85 r"^fetch/", 86 include( 87 [ 88 re_path( 89 r"^doc/(?P<pk>\d+)$", 90 RedirectView.as_view( 91 url=settings.BASE_URL + "api/documents/%(pk)s/download/" 92 ), 93 ), 94 re_path( 95 r"^thumb/(?P<pk>\d+)$", 96 RedirectView.as_view( 97 url=settings.BASE_URL + "api/documents/%(pk)s/thumb/" 98 ), 99 ), 100 re_path( 101 r"^preview/(?P<pk>\d+)$", 102 RedirectView.as_view( 103 url=settings.BASE_URL + "api/documents/%(pk)s/preview/" 104 ), 105 ), 106 ] 107 ), 108 ), 109 re_path( 110 r"^push$", 111 csrf_exempt( 112 RedirectView.as_view(url=settings.BASE_URL + "api/documents/post_document/") 113 ), 114 ), 115 # Frontend assets TODO: this is pretty bad, but it works. 116 path( 117 "assets/<path:path>", 118 RedirectView.as_view( 119 url=settings.STATIC_URL + "frontend/en-US/assets/%(path)s" 120 ), 121 ), 122 # TODO: with localization, this is even worse! :/ 123 # login, logout 124 path("accounts/", include("django.contrib.auth.urls")), 125 # Root of the Frontent 126 re_path(r".*", login_required(IndexView.as_view()), name="base"), 127 ] 128 129 130 websocket_urlpatterns = [ 131 re_path(r"ws/status/$", StatusConsumer.as_asgi()), 132 ] 133 134 # Text in each page's <h1> (and above login form). 135 admin.site.site_header = "Paperless-ng" 136 # Text at the end of each page's <title>. 137 admin.site.site_title = "Paperless-ng" 138 # Text at the top of the admin index page. 139 admin.site.index_title = _("Paperless-ng administration") 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/paperless/urls.py b/src/paperless/urls.py --- a/src/paperless/urls.py +++ b/src/paperless/urls.py @@ -132,8 +132,8 @@ ] # Text in each page's <h1> (and above login form). -admin.site.site_header = "Paperless-ng" +admin.site.site_header = "Paperless-ngx" # Text at the end of each page's <title>. -admin.site.site_title = "Paperless-ng" +admin.site.site_title = "Paperless-ngx" # Text at the top of the admin index page. -admin.site.index_title = _("Paperless-ng administration") +admin.site.index_title = _("Paperless-ngx administration")
{"golden_diff": "diff --git a/src/paperless/urls.py b/src/paperless/urls.py\n--- a/src/paperless/urls.py\n+++ b/src/paperless/urls.py\n@@ -132,8 +132,8 @@\n ]\n \n # Text in each page's <h1> (and above login form).\n-admin.site.site_header = \"Paperless-ng\"\n+admin.site.site_header = \"Paperless-ngx\"\n # Text at the end of each page's <title>.\n-admin.site.site_title = \"Paperless-ng\"\n+admin.site.site_title = \"Paperless-ngx\"\n # Text at the top of the admin index page.\n-admin.site.index_title = _(\"Paperless-ng administration\")\n+admin.site.index_title = _(\"Paperless-ngx administration\")\n", "issue": "[Other] Update name to paperless-ngx in docs\nhttps://github.com/paperless-ngx/paperless-ngx/issues/4#issuecomment-1039890021\r\n\r\n> I would go through the places where the project name is mentioned and add the x to it. Only actually code (variable names) could remain unchanged for backwards compatibility.\n", "before_files": [{"content": "from django.conf.urls import include\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import path, re_path\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import RedirectView\nfrom rest_framework.authtoken import views\nfrom rest_framework.routers import DefaultRouter\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django.conf import settings\n\nfrom paperless.consumers import StatusConsumer\nfrom documents.views import (\n CorrespondentViewSet,\n UnifiedSearchViewSet,\n LogViewSet,\n TagViewSet,\n DocumentTypeViewSet,\n IndexView,\n SearchAutoCompleteView,\n StatisticsView,\n PostDocumentView,\n SavedViewViewSet,\n BulkEditView,\n SelectionDataView,\n BulkDownloadView,\n)\nfrom paperless.views import FaviconView\n\napi_router = DefaultRouter()\napi_router.register(r\"correspondents\", CorrespondentViewSet)\napi_router.register(r\"document_types\", DocumentTypeViewSet)\napi_router.register(r\"documents\", UnifiedSearchViewSet)\napi_router.register(r\"logs\", LogViewSet, basename=\"logs\")\napi_router.register(r\"tags\", TagViewSet)\napi_router.register(r\"saved_views\", SavedViewViewSet)\n\n\nurlpatterns = [\n re_path(\n r\"^api/\",\n include(\n [\n re_path(\n r\"^auth/\",\n include(\n (\"rest_framework.urls\", \"rest_framework\"),\n namespace=\"rest_framework\",\n ),\n ),\n re_path(\n r\"^search/autocomplete/\",\n SearchAutoCompleteView.as_view(),\n name=\"autocomplete\",\n ),\n re_path(r\"^statistics/\", StatisticsView.as_view(), name=\"statistics\"),\n re_path(\n r\"^documents/post_document/\",\n PostDocumentView.as_view(),\n name=\"post_document\",\n ),\n re_path(\n r\"^documents/bulk_edit/\", BulkEditView.as_view(), name=\"bulk_edit\"\n ),\n re_path(\n r\"^documents/selection_data/\",\n SelectionDataView.as_view(),\n name=\"selection_data\",\n ),\n re_path(\n r\"^documents/bulk_download/\",\n BulkDownloadView.as_view(),\n name=\"bulk_download\",\n ),\n path(\"token/\", views.obtain_auth_token),\n ]\n + api_router.urls\n ),\n ),\n re_path(r\"^favicon.ico$\", FaviconView.as_view(), name=\"favicon\"),\n re_path(r\"admin/\", admin.site.urls),\n re_path(\n r\"^fetch/\",\n include(\n [\n re_path(\n r\"^doc/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/download/\"\n ),\n ),\n re_path(\n r\"^thumb/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/thumb/\"\n ),\n ),\n re_path(\n r\"^preview/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/preview/\"\n ),\n ),\n ]\n ),\n ),\n re_path(\n r\"^push$\",\n csrf_exempt(\n RedirectView.as_view(url=settings.BASE_URL + \"api/documents/post_document/\")\n ),\n ),\n # Frontend assets TODO: this is pretty bad, but it works.\n path(\n \"assets/<path:path>\",\n RedirectView.as_view(\n url=settings.STATIC_URL + \"frontend/en-US/assets/%(path)s\"\n ),\n ),\n # TODO: with localization, this is even worse! :/\n # login, logout\n path(\"accounts/\", include(\"django.contrib.auth.urls\")),\n # Root of the Frontent\n re_path(r\".*\", login_required(IndexView.as_view()), name=\"base\"),\n]\n\n\nwebsocket_urlpatterns = [\n re_path(r\"ws/status/$\", StatusConsumer.as_asgi()),\n]\n\n# Text in each page's <h1> (and above login form).\nadmin.site.site_header = \"Paperless-ng\"\n# Text at the end of each page's <title>.\nadmin.site.site_title = \"Paperless-ng\"\n# Text at the top of the admin index page.\nadmin.site.index_title = _(\"Paperless-ng administration\")\n", "path": "src/paperless/urls.py"}], "after_files": [{"content": "from django.conf.urls import include\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import path, re_path\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import RedirectView\nfrom rest_framework.authtoken import views\nfrom rest_framework.routers import DefaultRouter\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django.conf import settings\n\nfrom paperless.consumers import StatusConsumer\nfrom documents.views import (\n CorrespondentViewSet,\n UnifiedSearchViewSet,\n LogViewSet,\n TagViewSet,\n DocumentTypeViewSet,\n IndexView,\n SearchAutoCompleteView,\n StatisticsView,\n PostDocumentView,\n SavedViewViewSet,\n BulkEditView,\n SelectionDataView,\n BulkDownloadView,\n)\nfrom paperless.views import FaviconView\n\napi_router = DefaultRouter()\napi_router.register(r\"correspondents\", CorrespondentViewSet)\napi_router.register(r\"document_types\", DocumentTypeViewSet)\napi_router.register(r\"documents\", UnifiedSearchViewSet)\napi_router.register(r\"logs\", LogViewSet, basename=\"logs\")\napi_router.register(r\"tags\", TagViewSet)\napi_router.register(r\"saved_views\", SavedViewViewSet)\n\n\nurlpatterns = [\n re_path(\n r\"^api/\",\n include(\n [\n re_path(\n r\"^auth/\",\n include(\n (\"rest_framework.urls\", \"rest_framework\"),\n namespace=\"rest_framework\",\n ),\n ),\n re_path(\n r\"^search/autocomplete/\",\n SearchAutoCompleteView.as_view(),\n name=\"autocomplete\",\n ),\n re_path(r\"^statistics/\", StatisticsView.as_view(), name=\"statistics\"),\n re_path(\n r\"^documents/post_document/\",\n PostDocumentView.as_view(),\n name=\"post_document\",\n ),\n re_path(\n r\"^documents/bulk_edit/\", BulkEditView.as_view(), name=\"bulk_edit\"\n ),\n re_path(\n r\"^documents/selection_data/\",\n SelectionDataView.as_view(),\n name=\"selection_data\",\n ),\n re_path(\n r\"^documents/bulk_download/\",\n BulkDownloadView.as_view(),\n name=\"bulk_download\",\n ),\n path(\"token/\", views.obtain_auth_token),\n ]\n + api_router.urls\n ),\n ),\n re_path(r\"^favicon.ico$\", FaviconView.as_view(), name=\"favicon\"),\n re_path(r\"admin/\", admin.site.urls),\n re_path(\n r\"^fetch/\",\n include(\n [\n re_path(\n r\"^doc/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/download/\"\n ),\n ),\n re_path(\n r\"^thumb/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/thumb/\"\n ),\n ),\n re_path(\n r\"^preview/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/preview/\"\n ),\n ),\n ]\n ),\n ),\n re_path(\n r\"^push$\",\n csrf_exempt(\n RedirectView.as_view(url=settings.BASE_URL + \"api/documents/post_document/\")\n ),\n ),\n # Frontend assets TODO: this is pretty bad, but it works.\n path(\n \"assets/<path:path>\",\n RedirectView.as_view(\n url=settings.STATIC_URL + \"frontend/en-US/assets/%(path)s\"\n ),\n ),\n # TODO: with localization, this is even worse! :/\n # login, logout\n path(\"accounts/\", include(\"django.contrib.auth.urls\")),\n # Root of the Frontent\n re_path(r\".*\", login_required(IndexView.as_view()), name=\"base\"),\n]\n\n\nwebsocket_urlpatterns = [\n re_path(r\"ws/status/$\", StatusConsumer.as_asgi()),\n]\n\n# Text in each page's <h1> (and above login form).\nadmin.site.site_header = \"Paperless-ngx\"\n# Text at the end of each page's <title>.\nadmin.site.site_title = \"Paperless-ngx\"\n# Text at the top of the admin index page.\nadmin.site.index_title = _(\"Paperless-ngx administration\")\n", "path": "src/paperless/urls.py"}]}
1,537
163
gh_patches_debug_34903
rasdani/github-patches
git_diff
data-for-change__anyway-731
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Move load_discussions into main I can't figure out what's the purpose of this file --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `anyway/load_discussions.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import argparse 3 from .models import DiscussionMarker 4 import re 5 from .database import db_session 6 import sys 7 import logging 8 9 def main(): 10 parser = argparse.ArgumentParser() 11 parser.add_argument('identifiers', type=str, nargs='*', 12 help='Disqus identifiers to create markers for') 13 args = parser.parse_args() 14 15 identifiers = args.identifiers if args.identifiers else sys.stdin 16 17 for identifier in identifiers: 18 m = re.match(r'\((\d+\.\d+),\s*(\d+\.\d+)\)', identifier) 19 if not m: 20 logging.error("Failed processing: " + identifier) 21 continue 22 (latitude, longitude) = m.group(1, 2) 23 marker = DiscussionMarker.parse({ 24 'latitude': latitude, 25 'longitude': longitude, 26 'title': identifier, 27 'identifier': identifier 28 }) 29 try: 30 db_session.add(marker) 31 db_session.commit() 32 logging.info("Added: " + identifier, end="") 33 except: 34 db_session.rollback() 35 logging.warn("Failed: " + identifier, end="") 36 37 38 if __name__ == "__main__": 39 main() 40 ``` Path: `main.py` Content: ``` 1 #!/usr/bin/env python 2 3 import os 4 import click 5 import logging 6 7 @click.group() 8 def cli(): 9 pass 10 11 12 @cli.command() 13 @click.option('--open', 'open_server', is_flag=True, 14 help='Open the server for communication from outside', default=False) 15 def testserver(open_server): 16 from anyway import app, united 17 from apscheduler.scheduler import Scheduler 18 19 sched = Scheduler() 20 21 @sched.interval_schedule(hours=12) 22 def scheduled_import(): 23 united.main() 24 sched.start() 25 26 logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s') 27 28 default_host = '0.0.0.0' if open_server else '127.0.0.1' 29 app.run(debug=True, host=os.getenv('IP', default_host), 30 port=int(os.getenv('PORT', 5000))) 31 32 33 @cli.command() 34 def init_db(): 35 from anyway.models import init_db 36 init_db() 37 38 39 @cli.command() 40 @click.option('--specific_folder', is_flag=True, default=False) 41 @click.option('--delete_all', is_flag=True) 42 @click.option('--path', type=str, default="static/data/lms") 43 @click.option('--batch_size', type=int, default=100) 44 @click.option('--provider_code', type=int) 45 def process_data(specific_folder, delete_all, path, batch_size, provider_code): 46 from anyway.process import main 47 48 return main(specific_folder=specific_folder, delete_all=delete_all, path=path, 49 batch_size=batch_size, provider_code=provider_code) 50 51 52 @cli.command() 53 @click.option('--light', is_flag=True, help='Import without downloading any new files') 54 @click.option('--username', default='') 55 @click.option('--password', default='') 56 @click.option('--lastmail', is_flag=True) 57 def import_united_data(light, username, password, lastmail): 58 from anyway.united import main 59 60 return main(light=light, username=username, password=password, lastmail=lastmail) 61 62 if __name__ == '__main__': 63 cli() 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/anyway/load_discussions.py b/anyway/load_discussions.py deleted file mode 100755 --- a/anyway/load_discussions.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- -import argparse -from .models import DiscussionMarker -import re -from .database import db_session -import sys -import logging - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('identifiers', type=str, nargs='*', - help='Disqus identifiers to create markers for') - args = parser.parse_args() - - identifiers = args.identifiers if args.identifiers else sys.stdin - - for identifier in identifiers: - m = re.match(r'\((\d+\.\d+),\s*(\d+\.\d+)\)', identifier) - if not m: - logging.error("Failed processing: " + identifier) - continue - (latitude, longitude) = m.group(1, 2) - marker = DiscussionMarker.parse({ - 'latitude': latitude, - 'longitude': longitude, - 'title': identifier, - 'identifier': identifier - }) - try: - db_session.add(marker) - db_session.commit() - logging.info("Added: " + identifier, end="") - except: - db_session.rollback() - logging.warn("Failed: " + identifier, end="") - - -if __name__ == "__main__": - main() diff --git a/main.py b/main.py --- a/main.py +++ b/main.py @@ -1,8 +1,10 @@ #!/usr/bin/env python - -import os import click import logging +import os +import re +import sys + @click.group() def cli(): @@ -59,5 +61,36 @@ return main(light=light, username=username, password=password, lastmail=lastmail) + +@cli.command() +@click.argument('identifiers', nargs=-1) +def load_discussions(identifiers): + from anyway.database import db_session + from anyway.models import DiscussionMarker + + identifiers = identifiers or sys.stdin + + for identifier in identifiers: + identifier = identifier.strip() + m = re.match(r'\((\d+\.\d+),\s*(\d+\.\d+)\)', identifier) + if not m: + logging.error("Failed processing: " + identifier) + continue + (latitude, longitude) = m.group(1, 2) + marker = DiscussionMarker.parse({ + 'latitude': latitude, + 'longitude': longitude, + 'title': identifier, + 'identifier': identifier + }) + try: + db_session.add(marker) + db_session.commit() + logging.info("Added: " + identifier) + except Exception as e: + db_session.rollback() + logging.warn("Failed: " + identifier + ": " + e.message) + + if __name__ == '__main__': cli()
{"golden_diff": "diff --git a/anyway/load_discussions.py b/anyway/load_discussions.py\ndeleted file mode 100755\n--- a/anyway/load_discussions.py\n+++ /dev/null\n@@ -1,39 +0,0 @@\n-# -*- coding: utf-8 -*-\n-import argparse\n-from .models import DiscussionMarker\n-import re\n-from .database import db_session\n-import sys\n-import logging\n-\n-def main():\n- parser = argparse.ArgumentParser()\n- parser.add_argument('identifiers', type=str, nargs='*',\n- help='Disqus identifiers to create markers for')\n- args = parser.parse_args()\n-\n- identifiers = args.identifiers if args.identifiers else sys.stdin\n-\n- for identifier in identifiers:\n- m = re.match(r'\\((\\d+\\.\\d+),\\s*(\\d+\\.\\d+)\\)', identifier)\n- if not m:\n- logging.error(\"Failed processing: \" + identifier)\n- continue\n- (latitude, longitude) = m.group(1, 2)\n- marker = DiscussionMarker.parse({\n- 'latitude': latitude,\n- 'longitude': longitude,\n- 'title': identifier,\n- 'identifier': identifier\n- })\n- try:\n- db_session.add(marker)\n- db_session.commit()\n- logging.info(\"Added: \" + identifier, end=\"\")\n- except:\n- db_session.rollback()\n- logging.warn(\"Failed: \" + identifier, end=\"\")\n-\n-\n-if __name__ == \"__main__\":\n- main()\ndiff --git a/main.py b/main.py\n--- a/main.py\n+++ b/main.py\n@@ -1,8 +1,10 @@\n #!/usr/bin/env python\n-\n-import os\n import click\n import logging\n+import os\n+import re\n+import sys\n+\n \n @click.group()\n def cli():\n@@ -59,5 +61,36 @@\n \n return main(light=light, username=username, password=password, lastmail=lastmail)\n \n+\n+@cli.command()\n+@click.argument('identifiers', nargs=-1)\n+def load_discussions(identifiers):\n+ from anyway.database import db_session\n+ from anyway.models import DiscussionMarker\n+\n+ identifiers = identifiers or sys.stdin\n+\n+ for identifier in identifiers:\n+ identifier = identifier.strip()\n+ m = re.match(r'\\((\\d+\\.\\d+),\\s*(\\d+\\.\\d+)\\)', identifier)\n+ if not m:\n+ logging.error(\"Failed processing: \" + identifier)\n+ continue\n+ (latitude, longitude) = m.group(1, 2)\n+ marker = DiscussionMarker.parse({\n+ 'latitude': latitude,\n+ 'longitude': longitude,\n+ 'title': identifier,\n+ 'identifier': identifier\n+ })\n+ try:\n+ db_session.add(marker)\n+ db_session.commit()\n+ logging.info(\"Added: \" + identifier)\n+ except Exception as e:\n+ db_session.rollback()\n+ logging.warn(\"Failed: \" + identifier + \": \" + e.message)\n+\n+\n if __name__ == '__main__':\n cli()\n", "issue": "Move load_discussions into main\nI can't figure out what's the purpose of this file\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport argparse\nfrom .models import DiscussionMarker\nimport re\nfrom .database import db_session\nimport sys\nimport logging\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('identifiers', type=str, nargs='*',\n help='Disqus identifiers to create markers for')\n args = parser.parse_args()\n\n identifiers = args.identifiers if args.identifiers else sys.stdin\n\n for identifier in identifiers:\n m = re.match(r'\\((\\d+\\.\\d+),\\s*(\\d+\\.\\d+)\\)', identifier)\n if not m:\n logging.error(\"Failed processing: \" + identifier)\n continue\n (latitude, longitude) = m.group(1, 2)\n marker = DiscussionMarker.parse({\n 'latitude': latitude,\n 'longitude': longitude,\n 'title': identifier,\n 'identifier': identifier\n })\n try:\n db_session.add(marker)\n db_session.commit()\n logging.info(\"Added: \" + identifier, end=\"\")\n except:\n db_session.rollback()\n logging.warn(\"Failed: \" + identifier, end=\"\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "anyway/load_discussions.py"}, {"content": "#!/usr/bin/env python\n\nimport os\nimport click\nimport logging\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command()\n@click.option('--open', 'open_server', is_flag=True,\n help='Open the server for communication from outside', default=False)\ndef testserver(open_server):\n from anyway import app, united\n from apscheduler.scheduler import Scheduler\n\n sched = Scheduler()\n\n @sched.interval_schedule(hours=12)\n def scheduled_import():\n united.main()\n sched.start()\n\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')\n\n default_host = '0.0.0.0' if open_server else '127.0.0.1'\n app.run(debug=True, host=os.getenv('IP', default_host),\n port=int(os.getenv('PORT', 5000)))\n\n\n@cli.command()\ndef init_db():\n from anyway.models import init_db\n init_db()\n\n\n@cli.command()\n@click.option('--specific_folder', is_flag=True, default=False)\n@click.option('--delete_all', is_flag=True)\n@click.option('--path', type=str, default=\"static/data/lms\")\n@click.option('--batch_size', type=int, default=100)\n@click.option('--provider_code', type=int)\ndef process_data(specific_folder, delete_all, path, batch_size, provider_code):\n from anyway.process import main\n\n return main(specific_folder=specific_folder, delete_all=delete_all, path=path,\n batch_size=batch_size, provider_code=provider_code)\n\n\n@cli.command()\n@click.option('--light', is_flag=True, help='Import without downloading any new files')\n@click.option('--username', default='')\n@click.option('--password', default='')\n@click.option('--lastmail', is_flag=True)\ndef import_united_data(light, username, password, lastmail):\n from anyway.united import main\n\n return main(light=light, username=username, password=password, lastmail=lastmail)\n\nif __name__ == '__main__':\n cli()\n", "path": "main.py"}], "after_files": [{"content": null, "path": "anyway/load_discussions.py"}, {"content": "#!/usr/bin/env python\nimport click\nimport logging\nimport os\nimport re\nimport sys\n\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command()\n@click.option('--open', 'open_server', is_flag=True,\n help='Open the server for communication from outside', default=False)\ndef testserver(open_server):\n from anyway import app, united\n from apscheduler.scheduler import Scheduler\n\n sched = Scheduler()\n\n @sched.interval_schedule(hours=12)\n def scheduled_import():\n united.main()\n sched.start()\n\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')\n\n default_host = '0.0.0.0' if open_server else '127.0.0.1'\n app.run(debug=True, host=os.getenv('IP', default_host),\n port=int(os.getenv('PORT', 5000)))\n\n\n@cli.command()\ndef init_db():\n from anyway.models import init_db\n init_db()\n\n\n@cli.command()\n@click.option('--specific_folder', is_flag=True, default=False)\n@click.option('--delete_all', is_flag=True)\n@click.option('--path', type=str, default=\"static/data/lms\")\n@click.option('--batch_size', type=int, default=100)\n@click.option('--provider_code', type=int)\ndef process_data(specific_folder, delete_all, path, batch_size, provider_code):\n from anyway.process import main\n\n return main(specific_folder=specific_folder, delete_all=delete_all, path=path,\n batch_size=batch_size, provider_code=provider_code)\n\n\n@cli.command()\n@click.option('--light', is_flag=True, help='Import without downloading any new files')\n@click.option('--username', default='')\n@click.option('--password', default='')\n@click.option('--lastmail', is_flag=True)\ndef import_united_data(light, username, password, lastmail):\n from anyway.united import main\n\n return main(light=light, username=username, password=password, lastmail=lastmail)\n\n\n@cli.command()\n@click.argument('identifiers', nargs=-1)\ndef load_discussions(identifiers):\n from anyway.database import db_session\n from anyway.models import DiscussionMarker\n\n identifiers = identifiers or sys.stdin\n\n for identifier in identifiers:\n identifier = identifier.strip()\n m = re.match(r'\\((\\d+\\.\\d+),\\s*(\\d+\\.\\d+)\\)', identifier)\n if not m:\n logging.error(\"Failed processing: \" + identifier)\n continue\n (latitude, longitude) = m.group(1, 2)\n marker = DiscussionMarker.parse({\n 'latitude': latitude,\n 'longitude': longitude,\n 'title': identifier,\n 'identifier': identifier\n })\n try:\n db_session.add(marker)\n db_session.commit()\n logging.info(\"Added: \" + identifier)\n except Exception as e:\n db_session.rollback()\n logging.warn(\"Failed: \" + identifier + \": \" + e.message)\n\n\nif __name__ == '__main__':\n cli()\n", "path": "main.py"}]}
1,183
685
gh_patches_debug_367
rasdani/github-patches
git_diff
rasterio__rasterio-1305
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add `GetMetadataItem` like method Ref: https://github.com/mapbox/rasterio/issues/1077, I'm proposing to add a new method in https://github.com/mapbox/rasterio/blob/master/rasterio/_base.pyx to replicate GDAL GetMetadataItem **Method Name:** `get_metadata_item` or `get_metadata` **Why:** I need to be able to get TIFF metadata like `band.GetMetadataItem('IFD_OFFSET', 'TIFF')` **Code:** ```cython def get_metadata(self, bidx, ns, dm=None, ovr=None): """Returns metadata item Parameters ---------- bidx: int Band index, starting with 1. name: str The key for the metadata item to fetch. domain: str The domain to fetch for. ovr: int Overview level Returns ------- str """ cdef GDALMajorObjectH b = NULL cdef GDALMajorObjectH obj = NULL cdef char *value = NULL cdef const char *name = NULL cdef const char *domain = NULL ns = ns.encode('utf-8') name = ns if dm: dm = dm.encode('utf-8') domain = dm b = self.band(bidx) if ovr: b = GDALGetOverview(b, ovr) obj = b value = GDALGetMetadataItem(obj, name, domain) if value == NULL: return None else: return value ``` @sgillies I'm happy to submit a PR for that :-) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rasterio/errors.py` Content: ``` 1 """Errors and Warnings.""" 2 3 from click import FileError 4 5 6 class RasterioError(Exception): 7 """Root exception class""" 8 9 10 class WindowError(RasterioError): 11 """Raised when errors occur during window operations""" 12 13 14 class CRSError(ValueError): 15 """Raised when a CRS string or mapping is invalid or cannot serve 16 to define a coordinate transformation.""" 17 18 19 class EnvError(RasterioError): 20 """Raised when the state of GDAL/AWS environment cannot be created 21 or modified.""" 22 23 24 class DriverRegistrationError(ValueError): 25 """Raised when a format driver is requested but is not registered.""" 26 27 28 class FileOverwriteError(FileError): 29 """Raised when Rasterio's CLI refuses to clobber output files.""" 30 31 def __init__(self, message): 32 """Raise FileOverwriteError with message as hint.""" 33 super(FileOverwriteError, self).__init__('', hint=message) 34 35 36 class RasterioIOError(IOError): 37 """Raised when a dataset cannot be opened using one of the 38 registered format drivers.""" 39 40 41 class NodataShadowWarning(UserWarning): 42 """Warn that a dataset's nodata attribute is shadowing its alpha band.""" 43 44 def __str__(self): 45 return ("The dataset's nodata attribute is shadowing " 46 "the alpha band. All masks will be determined " 47 "by the nodata attribute") 48 49 50 class NotGeoreferencedWarning(UserWarning): 51 """Warn that a dataset isn't georeferenced.""" 52 53 54 class GDALBehaviorChangeException(RuntimeError): 55 """Raised when GDAL's behavior differs from the given arguments. For 56 example, antimeridian cutting is always on as of GDAL 2.2.0. Users 57 expecting it to be off will be presented with a MultiPolygon when the 58 rest of their code expects a Polygon. 59 60 # Raises an exception on GDAL >= 2.2.0 61 rasterio.warp.transform_geometry( 62 src_crs, dst_crs, antimeridian_cutting=False) 63 """ 64 65 66 class GDALOptionNotImplementedError(RasterioError): 67 """A dataset opening or dataset creation option can't be supported 68 69 This will be raised from Rasterio's shim modules. For example, when 70 a user passes arguments to open_dataset() that can't be evaluated 71 by GDAL 1.x. 72 """ 73 74 class GDALVersionError(RasterioError): 75 """Raised if the runtime version of GDAL does not meet the required 76 version of GDAL.""" 77 78 79 class WindowEvaluationError(ValueError): 80 """Raised when window evaluation fails""" 81 82 83 class RasterioDeprecationWarning(UserWarning): 84 """Rasterio module deprecations""" 85 86 87 class RasterBlockError(RasterioError): 88 """Raised when raster block access fails""" 89 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rasterio/errors.py b/rasterio/errors.py --- a/rasterio/errors.py +++ b/rasterio/errors.py @@ -86,3 +86,7 @@ class RasterBlockError(RasterioError): """Raised when raster block access fails""" + + +class BandOverviewError(UserWarning): + """Raised when a band overview access fails."""
{"golden_diff": "diff --git a/rasterio/errors.py b/rasterio/errors.py\n--- a/rasterio/errors.py\n+++ b/rasterio/errors.py\n@@ -86,3 +86,7 @@\n \n class RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n+\n+\n+class BandOverviewError(UserWarning):\n+ \"\"\"Raised when a band overview access fails.\"\"\"\n", "issue": "Add `GetMetadataItem` like method \nRef: https://github.com/mapbox/rasterio/issues/1077, \r\n\r\nI'm proposing to add a new method in https://github.com/mapbox/rasterio/blob/master/rasterio/_base.pyx to replicate GDAL GetMetadataItem\r\n\r\n\r\n**Method Name:** `get_metadata_item` or `get_metadata`\r\n**Why:** I need to be able to get TIFF metadata like `band.GetMetadataItem('IFD_OFFSET', 'TIFF')`\r\n\r\n**Code:**\r\n\r\n```cython\r\n def get_metadata(self, bidx, ns, dm=None, ovr=None):\r\n \"\"\"Returns metadata item\r\n\r\n Parameters\r\n ----------\r\n bidx: int\r\n Band index, starting with 1.\r\n name: str\r\n The key for the metadata item to fetch.\r\n domain: str\r\n The domain to fetch for.\r\n ovr: int\r\n Overview level\r\n\r\n Returns\r\n -------\r\n str\r\n \"\"\"\r\n cdef GDALMajorObjectH b = NULL\r\n cdef GDALMajorObjectH obj = NULL\r\n cdef char *value = NULL\r\n cdef const char *name = NULL\r\n cdef const char *domain = NULL\r\n\r\n ns = ns.encode('utf-8')\r\n name = ns\r\n\r\n if dm:\r\n dm = dm.encode('utf-8')\r\n domain = dm\r\n\r\n b = self.band(bidx)\r\n if ovr:\r\n b = GDALGetOverview(b, ovr)\r\n\r\n obj = b\r\n\r\n value = GDALGetMetadataItem(obj, name, domain)\r\n if value == NULL:\r\n return None\r\n else:\r\n return value\r\n```\r\n\r\n@sgillies I'm happy to submit a PR for that :-) \n", "before_files": [{"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n", "path": "rasterio/errors.py"}], "after_files": [{"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n\n\nclass BandOverviewError(UserWarning):\n \"\"\"Raised when a band overview access fails.\"\"\"\n", "path": "rasterio/errors.py"}]}
1,399
85
gh_patches_debug_33879
rasdani/github-patches
git_diff
TheAlgorithms__Python-9068
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Delete base85 algorithm ### Describe your change: Re #6216 Normally, I'm not in favour of just deleting algorithms, but I would make the argument that this is not an algorithm, rather just a snippet of code that utilises another library. Per `CONTRIBTUING.md` > Algorithms in this repo should not be how-to examples for existing Python packages. Instead, they should perform internal calculations or manipulations to convert input values into different output values This `base85` algorithm has essentially got two lines of code that purely utilise a singular library. The doctests only test an external library This repository should not contains examples on how to use a certain library, that would be the library documentation here https://docs.python.org/3/library/base64.html * [ ] Add an algorithm? * [ ] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? * [x] Delete an algorithm ### Checklist: * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms include at least one URL that points to Wikipedia or another similar explanation. * [x] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): "Fixes #ISSUE-NUMBER". --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ciphers/base32.py` Content: ``` 1 import base64 2 3 4 def base32_encode(string: str) -> bytes: 5 """ 6 Encodes a given string to base32, returning a bytes-like object 7 >>> base32_encode("Hello World!") 8 b'JBSWY3DPEBLW64TMMQQQ====' 9 >>> base32_encode("123456") 10 b'GEZDGNBVGY======' 11 >>> base32_encode("some long complex string") 12 b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=' 13 """ 14 15 # encoded the input (we need a bytes like object) 16 # then, b32encoded the bytes-like object 17 return base64.b32encode(string.encode("utf-8")) 18 19 20 def base32_decode(encoded_bytes: bytes) -> str: 21 """ 22 Decodes a given bytes-like object to a string, returning a string 23 >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====') 24 'Hello World!' 25 >>> base32_decode(b'GEZDGNBVGY======') 26 '123456' 27 >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=') 28 'some long complex string' 29 """ 30 31 # decode the bytes from base32 32 # then, decode the bytes-like object to return as a string 33 return base64.b32decode(encoded_bytes).decode("utf-8") 34 35 36 if __name__ == "__main__": 37 test = "Hello World!" 38 encoded = base32_encode(test) 39 print(encoded) 40 41 decoded = base32_decode(encoded) 42 print(decoded) 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ciphers/base32.py b/ciphers/base32.py --- a/ciphers/base32.py +++ b/ciphers/base32.py @@ -1,42 +1,45 @@ -import base64 +""" +Base32 encoding and decoding +https://en.wikipedia.org/wiki/Base32 +""" +B32_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" -def base32_encode(string: str) -> bytes: + +def base32_encode(data: bytes) -> bytes: """ - Encodes a given string to base32, returning a bytes-like object - >>> base32_encode("Hello World!") + >>> base32_encode(b"Hello World!") b'JBSWY3DPEBLW64TMMQQQ====' - >>> base32_encode("123456") + >>> base32_encode(b"123456") b'GEZDGNBVGY======' - >>> base32_encode("some long complex string") + >>> base32_encode(b"some long complex string") b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=' """ - - # encoded the input (we need a bytes like object) - # then, b32encoded the bytes-like object - return base64.b32encode(string.encode("utf-8")) + binary_data = "".join(bin(ord(d))[2:].zfill(8) for d in data.decode("utf-8")) + binary_data = binary_data.ljust(5 * ((len(binary_data) // 5) + 1), "0") + b32_chunks = map("".join, zip(*[iter(binary_data)] * 5)) + b32_result = "".join(B32_CHARSET[int(chunk, 2)] for chunk in b32_chunks) + return bytes(b32_result.ljust(8 * ((len(b32_result) // 8) + 1), "="), "utf-8") -def base32_decode(encoded_bytes: bytes) -> str: +def base32_decode(data: bytes) -> bytes: """ - Decodes a given bytes-like object to a string, returning a string >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====') - 'Hello World!' + b'Hello World!' >>> base32_decode(b'GEZDGNBVGY======') - '123456' + b'123456' >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=') - 'some long complex string' + b'some long complex string' """ - - # decode the bytes from base32 - # then, decode the bytes-like object to return as a string - return base64.b32decode(encoded_bytes).decode("utf-8") + binary_chunks = "".join( + bin(B32_CHARSET.index(_d))[2:].zfill(5) + for _d in data.decode("utf-8").strip("=") + ) + binary_data = list(map("".join, zip(*[iter(binary_chunks)] * 8))) + return bytes("".join([chr(int(_d, 2)) for _d in binary_data]), "utf-8") if __name__ == "__main__": - test = "Hello World!" - encoded = base32_encode(test) - print(encoded) + import doctest - decoded = base32_decode(encoded) - print(decoded) + doctest.testmod()
{"golden_diff": "diff --git a/ciphers/base32.py b/ciphers/base32.py\n--- a/ciphers/base32.py\n+++ b/ciphers/base32.py\n@@ -1,42 +1,45 @@\n-import base64\n+\"\"\"\n+Base32 encoding and decoding\n \n+https://en.wikipedia.org/wiki/Base32\n+\"\"\"\n+B32_CHARSET = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567\"\n \n-def base32_encode(string: str) -> bytes:\n+\n+def base32_encode(data: bytes) -> bytes:\n \"\"\"\n- Encodes a given string to base32, returning a bytes-like object\n- >>> base32_encode(\"Hello World!\")\n+ >>> base32_encode(b\"Hello World!\")\n b'JBSWY3DPEBLW64TMMQQQ===='\n- >>> base32_encode(\"123456\")\n+ >>> base32_encode(b\"123456\")\n b'GEZDGNBVGY======'\n- >>> base32_encode(\"some long complex string\")\n+ >>> base32_encode(b\"some long complex string\")\n b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY='\n \"\"\"\n-\n- # encoded the input (we need a bytes like object)\n- # then, b32encoded the bytes-like object\n- return base64.b32encode(string.encode(\"utf-8\"))\n+ binary_data = \"\".join(bin(ord(d))[2:].zfill(8) for d in data.decode(\"utf-8\"))\n+ binary_data = binary_data.ljust(5 * ((len(binary_data) // 5) + 1), \"0\")\n+ b32_chunks = map(\"\".join, zip(*[iter(binary_data)] * 5))\n+ b32_result = \"\".join(B32_CHARSET[int(chunk, 2)] for chunk in b32_chunks)\n+ return bytes(b32_result.ljust(8 * ((len(b32_result) // 8) + 1), \"=\"), \"utf-8\")\n \n \n-def base32_decode(encoded_bytes: bytes) -> str:\n+def base32_decode(data: bytes) -> bytes:\n \"\"\"\n- Decodes a given bytes-like object to a string, returning a string\n >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====')\n- 'Hello World!'\n+ b'Hello World!'\n >>> base32_decode(b'GEZDGNBVGY======')\n- '123456'\n+ b'123456'\n >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=')\n- 'some long complex string'\n+ b'some long complex string'\n \"\"\"\n-\n- # decode the bytes from base32\n- # then, decode the bytes-like object to return as a string\n- return base64.b32decode(encoded_bytes).decode(\"utf-8\")\n+ binary_chunks = \"\".join(\n+ bin(B32_CHARSET.index(_d))[2:].zfill(5)\n+ for _d in data.decode(\"utf-8\").strip(\"=\")\n+ )\n+ binary_data = list(map(\"\".join, zip(*[iter(binary_chunks)] * 8)))\n+ return bytes(\"\".join([chr(int(_d, 2)) for _d in binary_data]), \"utf-8\")\n \n \n if __name__ == \"__main__\":\n- test = \"Hello World!\"\n- encoded = base32_encode(test)\n- print(encoded)\n+ import doctest\n \n- decoded = base32_decode(encoded)\n- print(decoded)\n+ doctest.testmod()\n", "issue": "Delete base85 algorithm\n### Describe your change:\r\nRe #6216\r\n\r\nNormally, I'm not in favour of just deleting algorithms, but I would make the argument that this is not an algorithm, rather just a snippet of code that utilises another library.\r\n\r\nPer `CONTRIBTUING.md`\r\n> Algorithms in this repo should not be how-to examples for existing Python packages. Instead, they should perform internal calculations or manipulations to convert input values into different output values\r\nThis `base85` algorithm has essentially got two lines of code that purely utilise a singular library. The doctests only test an external library\r\n\r\nThis repository should not contains examples on how to use a certain library, that would be the library documentation here\r\nhttps://docs.python.org/3/library/base64.html\r\n\r\n\r\n* [ ] Add an algorithm?\r\n* [ ] Fix a bug or typo in an existing algorithm?\r\n* [ ] Documentation change?\r\n* [x] Delete an algorithm\r\n\r\n### Checklist:\r\n* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).\r\n* [x] This pull request is all my own work -- I have not plagiarized.\r\n* [x] I know that pull requests will not be merged if they fail the automated tests.\r\n* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.\r\n* [x] All new Python files are placed inside an existing directory.\r\n* [x] All filenames are in all lowercase characters with no spaces or dashes.\r\n* [x] All functions and variable names follow Python naming conventions.\r\n* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).\r\n* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.\r\n* [x] All new algorithms include at least one URL that points to Wikipedia or another similar explanation.\r\n* [x] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): \"Fixes #ISSUE-NUMBER\".\r\n\n", "before_files": [{"content": "import base64\n\n\ndef base32_encode(string: str) -> bytes:\n \"\"\"\n Encodes a given string to base32, returning a bytes-like object\n >>> base32_encode(\"Hello World!\")\n b'JBSWY3DPEBLW64TMMQQQ===='\n >>> base32_encode(\"123456\")\n b'GEZDGNBVGY======'\n >>> base32_encode(\"some long complex string\")\n b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY='\n \"\"\"\n\n # encoded the input (we need a bytes like object)\n # then, b32encoded the bytes-like object\n return base64.b32encode(string.encode(\"utf-8\"))\n\n\ndef base32_decode(encoded_bytes: bytes) -> str:\n \"\"\"\n Decodes a given bytes-like object to a string, returning a string\n >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====')\n 'Hello World!'\n >>> base32_decode(b'GEZDGNBVGY======')\n '123456'\n >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=')\n 'some long complex string'\n \"\"\"\n\n # decode the bytes from base32\n # then, decode the bytes-like object to return as a string\n return base64.b32decode(encoded_bytes).decode(\"utf-8\")\n\n\nif __name__ == \"__main__\":\n test = \"Hello World!\"\n encoded = base32_encode(test)\n print(encoded)\n\n decoded = base32_decode(encoded)\n print(decoded)\n", "path": "ciphers/base32.py"}], "after_files": [{"content": "\"\"\"\nBase32 encoding and decoding\n\nhttps://en.wikipedia.org/wiki/Base32\n\"\"\"\nB32_CHARSET = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567\"\n\n\ndef base32_encode(data: bytes) -> bytes:\n \"\"\"\n >>> base32_encode(b\"Hello World!\")\n b'JBSWY3DPEBLW64TMMQQQ===='\n >>> base32_encode(b\"123456\")\n b'GEZDGNBVGY======'\n >>> base32_encode(b\"some long complex string\")\n b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY='\n \"\"\"\n binary_data = \"\".join(bin(ord(d))[2:].zfill(8) for d in data.decode(\"utf-8\"))\n binary_data = binary_data.ljust(5 * ((len(binary_data) // 5) + 1), \"0\")\n b32_chunks = map(\"\".join, zip(*[iter(binary_data)] * 5))\n b32_result = \"\".join(B32_CHARSET[int(chunk, 2)] for chunk in b32_chunks)\n return bytes(b32_result.ljust(8 * ((len(b32_result) // 8) + 1), \"=\"), \"utf-8\")\n\n\ndef base32_decode(data: bytes) -> bytes:\n \"\"\"\n >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====')\n b'Hello World!'\n >>> base32_decode(b'GEZDGNBVGY======')\n b'123456'\n >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=')\n b'some long complex string'\n \"\"\"\n binary_chunks = \"\".join(\n bin(B32_CHARSET.index(_d))[2:].zfill(5)\n for _d in data.decode(\"utf-8\").strip(\"=\")\n )\n binary_data = list(map(\"\".join, zip(*[iter(binary_chunks)] * 8)))\n return bytes(\"\".join([chr(int(_d, 2)) for _d in binary_data]), \"utf-8\")\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "ciphers/base32.py"}]}
1,239
855
gh_patches_debug_1834
rasdani/github-patches
git_diff
mozilla__pontoon-3003
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- GetText check fails incorrectly on newline https://pontoon.mozilla.org/en-GB/all-projects/all-resources/?string=286055 If you copy the source string, an extra line is added at the back, and that fails the checks for GetText. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pontoon/translations/forms.py` Content: ``` 1 from django import forms 2 from django.contrib.postgres.forms import SimpleArrayField 3 4 from pontoon.base.models import ( 5 Entity, 6 Locale, 7 ) 8 9 10 class CreateTranslationForm(forms.Form): 11 """ 12 Form for parameters to the `entities` view. 13 """ 14 15 entity = forms.IntegerField() 16 locale = forms.CharField() 17 plural_form = forms.CharField() 18 19 # Some file formats allow empty original strings and translations. 20 # We must allow both here. Validation is handled in pontoon.checks module. 21 original = forms.CharField(required=False) 22 translation = forms.CharField(required=False) 23 24 ignore_warnings = forms.BooleanField(required=False) 25 approve = forms.BooleanField(required=False) 26 force_suggestions = forms.BooleanField(required=False) 27 paths = forms.MultipleChoiceField(required=False) 28 machinery_sources = SimpleArrayField(forms.CharField(max_length=30), required=False) 29 30 def clean_paths(self): 31 try: 32 return self.data.getlist("paths[]") 33 except AttributeError: 34 # If the data source is not a QueryDict, it won't have a `getlist` method. 35 return self.data.get("paths[]") or [] 36 37 def clean_entity(self): 38 try: 39 return Entity.objects.get(pk=self.cleaned_data["entity"]) 40 except Entity.DoesNotExist: 41 raise forms.ValidationError(f"Entity `{self.entity}` could not be found") 42 43 def clean_locale(self): 44 try: 45 return Locale.objects.get(code=self.cleaned_data["locale"]) 46 except Locale.DoesNotExist: 47 raise forms.ValidationError(f"Locale `{self.entity}` could not be found") 48 49 def clean_plural_form(self): 50 if self.cleaned_data["plural_form"] == "-1": 51 return None 52 return self.cleaned_data["plural_form"] 53 54 def clean_translation(self): 55 return self.data.get("translation", "") 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pontoon/translations/forms.py b/pontoon/translations/forms.py --- a/pontoon/translations/forms.py +++ b/pontoon/translations/forms.py @@ -51,5 +51,8 @@ return None return self.cleaned_data["plural_form"] + def clean_original(self): + return self.data.get("original", "") + def clean_translation(self): return self.data.get("translation", "")
{"golden_diff": "diff --git a/pontoon/translations/forms.py b/pontoon/translations/forms.py\n--- a/pontoon/translations/forms.py\n+++ b/pontoon/translations/forms.py\n@@ -51,5 +51,8 @@\n return None\n return self.cleaned_data[\"plural_form\"]\n \n+ def clean_original(self):\n+ return self.data.get(\"original\", \"\")\n+\n def clean_translation(self):\n return self.data.get(\"translation\", \"\")\n", "issue": "GetText check fails incorrectly on newline\nhttps://pontoon.mozilla.org/en-GB/all-projects/all-resources/?string=286055\r\n\r\nIf you copy the source string, an extra line is added at the back, and that fails the checks for GetText.\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.postgres.forms import SimpleArrayField\n\nfrom pontoon.base.models import (\n Entity,\n Locale,\n)\n\n\nclass CreateTranslationForm(forms.Form):\n \"\"\"\n Form for parameters to the `entities` view.\n \"\"\"\n\n entity = forms.IntegerField()\n locale = forms.CharField()\n plural_form = forms.CharField()\n\n # Some file formats allow empty original strings and translations.\n # We must allow both here. Validation is handled in pontoon.checks module.\n original = forms.CharField(required=False)\n translation = forms.CharField(required=False)\n\n ignore_warnings = forms.BooleanField(required=False)\n approve = forms.BooleanField(required=False)\n force_suggestions = forms.BooleanField(required=False)\n paths = forms.MultipleChoiceField(required=False)\n machinery_sources = SimpleArrayField(forms.CharField(max_length=30), required=False)\n\n def clean_paths(self):\n try:\n return self.data.getlist(\"paths[]\")\n except AttributeError:\n # If the data source is not a QueryDict, it won't have a `getlist` method.\n return self.data.get(\"paths[]\") or []\n\n def clean_entity(self):\n try:\n return Entity.objects.get(pk=self.cleaned_data[\"entity\"])\n except Entity.DoesNotExist:\n raise forms.ValidationError(f\"Entity `{self.entity}` could not be found\")\n\n def clean_locale(self):\n try:\n return Locale.objects.get(code=self.cleaned_data[\"locale\"])\n except Locale.DoesNotExist:\n raise forms.ValidationError(f\"Locale `{self.entity}` could not be found\")\n\n def clean_plural_form(self):\n if self.cleaned_data[\"plural_form\"] == \"-1\":\n return None\n return self.cleaned_data[\"plural_form\"]\n\n def clean_translation(self):\n return self.data.get(\"translation\", \"\")\n", "path": "pontoon/translations/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.postgres.forms import SimpleArrayField\n\nfrom pontoon.base.models import (\n Entity,\n Locale,\n)\n\n\nclass CreateTranslationForm(forms.Form):\n \"\"\"\n Form for parameters to the `entities` view.\n \"\"\"\n\n entity = forms.IntegerField()\n locale = forms.CharField()\n plural_form = forms.CharField()\n\n # Some file formats allow empty original strings and translations.\n # We must allow both here. Validation is handled in pontoon.checks module.\n original = forms.CharField(required=False)\n translation = forms.CharField(required=False)\n\n ignore_warnings = forms.BooleanField(required=False)\n approve = forms.BooleanField(required=False)\n force_suggestions = forms.BooleanField(required=False)\n paths = forms.MultipleChoiceField(required=False)\n machinery_sources = SimpleArrayField(forms.CharField(max_length=30), required=False)\n\n def clean_paths(self):\n try:\n return self.data.getlist(\"paths[]\")\n except AttributeError:\n # If the data source is not a QueryDict, it won't have a `getlist` method.\n return self.data.get(\"paths[]\") or []\n\n def clean_entity(self):\n try:\n return Entity.objects.get(pk=self.cleaned_data[\"entity\"])\n except Entity.DoesNotExist:\n raise forms.ValidationError(f\"Entity `{self.entity}` could not be found\")\n\n def clean_locale(self):\n try:\n return Locale.objects.get(code=self.cleaned_data[\"locale\"])\n except Locale.DoesNotExist:\n raise forms.ValidationError(f\"Locale `{self.entity}` could not be found\")\n\n def clean_plural_form(self):\n if self.cleaned_data[\"plural_form\"] == \"-1\":\n return None\n return self.cleaned_data[\"plural_form\"]\n\n def clean_original(self):\n return self.data.get(\"original\", \"\")\n\n def clean_translation(self):\n return self.data.get(\"translation\", \"\")\n", "path": "pontoon/translations/forms.py"}]}
795
99
gh_patches_debug_8461
rasdani/github-patches
git_diff
deepset-ai__haystack-7603
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add e2e tests for pipeline evaluation for 2.x Test eval for 2.x with tiny dataset, check dataframes, isolated, integrated eval (migrate existing integration tests to e2e tests for 2.x) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `haystack/evaluation/eval_run_result.py` Content: ``` 1 from abc import ABC, abstractmethod 2 from copy import deepcopy 3 from typing import Any, Dict, List 4 from warnings import warn 5 6 from pandas import DataFrame 7 from pandas import concat as pd_concat 8 9 10 class BaseEvaluationRunResult(ABC): 11 """ 12 Represents the results of an evaluation run. 13 """ 14 15 @abstractmethod 16 def to_pandas(self) -> "DataFrame": 17 """ 18 Creates a Pandas DataFrame containing the scores of each metric for every input sample. 19 20 :returns: 21 Pandas DataFrame with the scores. 22 """ 23 24 @abstractmethod 25 def score_report(self) -> "DataFrame": 26 """ 27 Transforms the results into a Pandas DataFrame with the aggregated scores for each metric. 28 29 :returns: 30 Pandas DataFrame with the aggregated scores. 31 """ 32 33 @abstractmethod 34 def comparative_individual_scores_report(self, other: "BaseEvaluationRunResult") -> "DataFrame": 35 """ 36 Creates a Pandas DataFrame with the scores for each metric in the results of two different evaluation runs. 37 38 The inputs to both evaluation runs is assumed to be the same. 39 40 :param other: 41 Results of another evaluation run to compare with. 42 :returns: 43 Pandas DataFrame with the score comparison. 44 """ 45 46 47 class EvaluationRunResult(BaseEvaluationRunResult): 48 """ 49 Contains the inputs and the outputs of an evaluation pipeline and provides methods to inspect them. 50 """ 51 52 def __init__(self, run_name: str, inputs: Dict[str, List[Any]], results: Dict[str, Dict[str, Any]]): 53 """ 54 Initialize a new evaluation run result. 55 56 :param run_name: 57 Name of the evaluation run. 58 :param inputs: 59 Dictionary containing the inputs used for the run. 60 Each key is the name of the input and its value is 61 a list of input values. The length of the lists should 62 be the same. 63 :param results: 64 Dictionary containing the results of the evaluators 65 used in the evaluation pipeline. Each key is the name 66 of the metric and its value is dictionary with the following 67 keys: 68 - 'score': The aggregated score for the metric. 69 - 'individual_scores': A list of scores for each input sample. 70 """ 71 self.run_name = run_name 72 self.inputs = deepcopy(inputs) 73 self.results = deepcopy(results) 74 75 if len(inputs) == 0: 76 raise ValueError("No inputs provided.") 77 if len({len(l) for l in inputs.values()}) != 1: 78 raise ValueError("Lengths of the inputs should be the same.") 79 80 expected_len = len(next(iter(inputs.values()))) 81 82 for metric, outputs in results.items(): 83 if "score" not in outputs: 84 raise ValueError(f"Aggregate score missing for {metric}.") 85 if "individual_scores" not in outputs: 86 raise ValueError(f"Individual scores missing for {metric}.") 87 88 if len(outputs["individual_scores"]) != expected_len: 89 raise ValueError( 90 f"Length of individual scores for '{metric}' should be the same as the inputs. " 91 f"Got {len(outputs['individual_scores'])} but expected {expected_len}." 92 ) 93 94 def score_report(self) -> DataFrame: # noqa: D102 95 results = {k: v["score"] for k, v in self.results.items()} 96 return DataFrame.from_dict(results, orient="index", columns=["score"]) 97 98 def to_pandas(self) -> DataFrame: # noqa: D102 99 inputs_columns = list(self.inputs.keys()) 100 inputs_values = list(self.inputs.values()) 101 inputs_values = list(map(list, zip(*inputs_values))) # transpose the values 102 df_inputs = DataFrame(inputs_values, columns=inputs_columns) 103 104 scores_columns = list(self.results.keys()) 105 scores_values = [v["individual_scores"] for v in self.results.values()] 106 scores_values = list(map(list, zip(*scores_values))) # transpose the values 107 df_scores = DataFrame(scores_values, columns=scores_columns) 108 109 return df_inputs.join(df_scores) 110 111 def comparative_individual_scores_report(self, other: "BaseEvaluationRunResult") -> DataFrame: # noqa: D102 112 if not isinstance(other, EvaluationRunResult): 113 raise ValueError("Comparative scores can only be computed between EvaluationRunResults.") 114 115 this_name = self.run_name 116 other_name = other.run_name 117 if this_name == other_name: 118 warn(f"The run names of the two evaluation results are the same ('{this_name}')") 119 this_name = f"{this_name}_first" 120 other_name = f"{other_name}_second" 121 122 if self.inputs != other.inputs: 123 warn(f"The inputs to the two evaluation results differ; using the inputs of '{this_name}'.") 124 125 pipe_a_df = self.to_pandas() 126 pipe_b_df = other.to_pandas() 127 128 ignore = list(self.inputs.keys()) 129 pipe_b_df.drop(columns=ignore, inplace=True, errors="ignore") 130 pipe_b_df.columns = [f"{other_name}_{column}" for column in pipe_b_df.columns] # type: ignore 131 pipe_a_df.columns = [ 132 f"{this_name}_{col}" if col not in ignore else col for col in pipe_a_df.columns 133 ] # type: ignore 134 135 results_df = pd_concat([pipe_a_df, pipe_b_df], axis=1) 136 return results_df 137 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/haystack/evaluation/eval_run_result.py b/haystack/evaluation/eval_run_result.py --- a/haystack/evaluation/eval_run_result.py +++ b/haystack/evaluation/eval_run_result.py @@ -119,8 +119,8 @@ this_name = f"{this_name}_first" other_name = f"{other_name}_second" - if self.inputs != other.inputs: - warn(f"The inputs to the two evaluation results differ; using the inputs of '{this_name}'.") + if self.inputs.keys() != other.inputs.keys(): + warn(f"The input columns differ between the results; using the input columns of '{this_name}'.") pipe_a_df = self.to_pandas() pipe_b_df = other.to_pandas()
{"golden_diff": "diff --git a/haystack/evaluation/eval_run_result.py b/haystack/evaluation/eval_run_result.py\n--- a/haystack/evaluation/eval_run_result.py\n+++ b/haystack/evaluation/eval_run_result.py\n@@ -119,8 +119,8 @@\n this_name = f\"{this_name}_first\"\n other_name = f\"{other_name}_second\"\n \n- if self.inputs != other.inputs:\n- warn(f\"The inputs to the two evaluation results differ; using the inputs of '{this_name}'.\")\n+ if self.inputs.keys() != other.inputs.keys():\n+ warn(f\"The input columns differ between the results; using the input columns of '{this_name}'.\")\n \n pipe_a_df = self.to_pandas()\n pipe_b_df = other.to_pandas()\n", "issue": "Add e2e tests for pipeline evaluation for 2.x\nTest eval for 2.x with tiny dataset, check dataframes, isolated, integrated eval (migrate existing integration tests to e2e tests for 2.x)\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nfrom copy import deepcopy\nfrom typing import Any, Dict, List\nfrom warnings import warn\n\nfrom pandas import DataFrame\nfrom pandas import concat as pd_concat\n\n\nclass BaseEvaluationRunResult(ABC):\n \"\"\"\n Represents the results of an evaluation run.\n \"\"\"\n\n @abstractmethod\n def to_pandas(self) -> \"DataFrame\":\n \"\"\"\n Creates a Pandas DataFrame containing the scores of each metric for every input sample.\n\n :returns:\n Pandas DataFrame with the scores.\n \"\"\"\n\n @abstractmethod\n def score_report(self) -> \"DataFrame\":\n \"\"\"\n Transforms the results into a Pandas DataFrame with the aggregated scores for each metric.\n\n :returns:\n Pandas DataFrame with the aggregated scores.\n \"\"\"\n\n @abstractmethod\n def comparative_individual_scores_report(self, other: \"BaseEvaluationRunResult\") -> \"DataFrame\":\n \"\"\"\n Creates a Pandas DataFrame with the scores for each metric in the results of two different evaluation runs.\n\n The inputs to both evaluation runs is assumed to be the same.\n\n :param other:\n Results of another evaluation run to compare with.\n :returns:\n Pandas DataFrame with the score comparison.\n \"\"\"\n\n\nclass EvaluationRunResult(BaseEvaluationRunResult):\n \"\"\"\n Contains the inputs and the outputs of an evaluation pipeline and provides methods to inspect them.\n \"\"\"\n\n def __init__(self, run_name: str, inputs: Dict[str, List[Any]], results: Dict[str, Dict[str, Any]]):\n \"\"\"\n Initialize a new evaluation run result.\n\n :param run_name:\n Name of the evaluation run.\n :param inputs:\n Dictionary containing the inputs used for the run.\n Each key is the name of the input and its value is\n a list of input values. The length of the lists should\n be the same.\n :param results:\n Dictionary containing the results of the evaluators\n used in the evaluation pipeline. Each key is the name\n of the metric and its value is dictionary with the following\n keys:\n - 'score': The aggregated score for the metric.\n - 'individual_scores': A list of scores for each input sample.\n \"\"\"\n self.run_name = run_name\n self.inputs = deepcopy(inputs)\n self.results = deepcopy(results)\n\n if len(inputs) == 0:\n raise ValueError(\"No inputs provided.\")\n if len({len(l) for l in inputs.values()}) != 1:\n raise ValueError(\"Lengths of the inputs should be the same.\")\n\n expected_len = len(next(iter(inputs.values())))\n\n for metric, outputs in results.items():\n if \"score\" not in outputs:\n raise ValueError(f\"Aggregate score missing for {metric}.\")\n if \"individual_scores\" not in outputs:\n raise ValueError(f\"Individual scores missing for {metric}.\")\n\n if len(outputs[\"individual_scores\"]) != expected_len:\n raise ValueError(\n f\"Length of individual scores for '{metric}' should be the same as the inputs. \"\n f\"Got {len(outputs['individual_scores'])} but expected {expected_len}.\"\n )\n\n def score_report(self) -> DataFrame: # noqa: D102\n results = {k: v[\"score\"] for k, v in self.results.items()}\n return DataFrame.from_dict(results, orient=\"index\", columns=[\"score\"])\n\n def to_pandas(self) -> DataFrame: # noqa: D102\n inputs_columns = list(self.inputs.keys())\n inputs_values = list(self.inputs.values())\n inputs_values = list(map(list, zip(*inputs_values))) # transpose the values\n df_inputs = DataFrame(inputs_values, columns=inputs_columns)\n\n scores_columns = list(self.results.keys())\n scores_values = [v[\"individual_scores\"] for v in self.results.values()]\n scores_values = list(map(list, zip(*scores_values))) # transpose the values\n df_scores = DataFrame(scores_values, columns=scores_columns)\n\n return df_inputs.join(df_scores)\n\n def comparative_individual_scores_report(self, other: \"BaseEvaluationRunResult\") -> DataFrame: # noqa: D102\n if not isinstance(other, EvaluationRunResult):\n raise ValueError(\"Comparative scores can only be computed between EvaluationRunResults.\")\n\n this_name = self.run_name\n other_name = other.run_name\n if this_name == other_name:\n warn(f\"The run names of the two evaluation results are the same ('{this_name}')\")\n this_name = f\"{this_name}_first\"\n other_name = f\"{other_name}_second\"\n\n if self.inputs != other.inputs:\n warn(f\"The inputs to the two evaluation results differ; using the inputs of '{this_name}'.\")\n\n pipe_a_df = self.to_pandas()\n pipe_b_df = other.to_pandas()\n\n ignore = list(self.inputs.keys())\n pipe_b_df.drop(columns=ignore, inplace=True, errors=\"ignore\")\n pipe_b_df.columns = [f\"{other_name}_{column}\" for column in pipe_b_df.columns] # type: ignore\n pipe_a_df.columns = [\n f\"{this_name}_{col}\" if col not in ignore else col for col in pipe_a_df.columns\n ] # type: ignore\n\n results_df = pd_concat([pipe_a_df, pipe_b_df], axis=1)\n return results_df\n", "path": "haystack/evaluation/eval_run_result.py"}], "after_files": [{"content": "from abc import ABC, abstractmethod\nfrom copy import deepcopy\nfrom typing import Any, Dict, List\nfrom warnings import warn\n\nfrom pandas import DataFrame\nfrom pandas import concat as pd_concat\n\n\nclass BaseEvaluationRunResult(ABC):\n \"\"\"\n Represents the results of an evaluation run.\n \"\"\"\n\n @abstractmethod\n def to_pandas(self) -> \"DataFrame\":\n \"\"\"\n Creates a Pandas DataFrame containing the scores of each metric for every input sample.\n\n :returns:\n Pandas DataFrame with the scores.\n \"\"\"\n\n @abstractmethod\n def score_report(self) -> \"DataFrame\":\n \"\"\"\n Transforms the results into a Pandas DataFrame with the aggregated scores for each metric.\n\n :returns:\n Pandas DataFrame with the aggregated scores.\n \"\"\"\n\n @abstractmethod\n def comparative_individual_scores_report(self, other: \"BaseEvaluationRunResult\") -> \"DataFrame\":\n \"\"\"\n Creates a Pandas DataFrame with the scores for each metric in the results of two different evaluation runs.\n\n The inputs to both evaluation runs is assumed to be the same.\n\n :param other:\n Results of another evaluation run to compare with.\n :returns:\n Pandas DataFrame with the score comparison.\n \"\"\"\n\n\nclass EvaluationRunResult(BaseEvaluationRunResult):\n \"\"\"\n Contains the inputs and the outputs of an evaluation pipeline and provides methods to inspect them.\n \"\"\"\n\n def __init__(self, run_name: str, inputs: Dict[str, List[Any]], results: Dict[str, Dict[str, Any]]):\n \"\"\"\n Initialize a new evaluation run result.\n\n :param run_name:\n Name of the evaluation run.\n :param inputs:\n Dictionary containing the inputs used for the run.\n Each key is the name of the input and its value is\n a list of input values. The length of the lists should\n be the same.\n :param results:\n Dictionary containing the results of the evaluators\n used in the evaluation pipeline. Each key is the name\n of the metric and its value is dictionary with the following\n keys:\n - 'score': The aggregated score for the metric.\n - 'individual_scores': A list of scores for each input sample.\n \"\"\"\n self.run_name = run_name\n self.inputs = deepcopy(inputs)\n self.results = deepcopy(results)\n\n if len(inputs) == 0:\n raise ValueError(\"No inputs provided.\")\n if len({len(l) for l in inputs.values()}) != 1:\n raise ValueError(\"Lengths of the inputs should be the same.\")\n\n expected_len = len(next(iter(inputs.values())))\n\n for metric, outputs in results.items():\n if \"score\" not in outputs:\n raise ValueError(f\"Aggregate score missing for {metric}.\")\n if \"individual_scores\" not in outputs:\n raise ValueError(f\"Individual scores missing for {metric}.\")\n\n if len(outputs[\"individual_scores\"]) != expected_len:\n raise ValueError(\n f\"Length of individual scores for '{metric}' should be the same as the inputs. \"\n f\"Got {len(outputs['individual_scores'])} but expected {expected_len}.\"\n )\n\n def score_report(self) -> DataFrame: # noqa: D102\n results = {k: v[\"score\"] for k, v in self.results.items()}\n return DataFrame.from_dict(results, orient=\"index\", columns=[\"score\"])\n\n def to_pandas(self) -> DataFrame: # noqa: D102\n inputs_columns = list(self.inputs.keys())\n inputs_values = list(self.inputs.values())\n inputs_values = list(map(list, zip(*inputs_values))) # transpose the values\n df_inputs = DataFrame(inputs_values, columns=inputs_columns)\n\n scores_columns = list(self.results.keys())\n scores_values = [v[\"individual_scores\"] for v in self.results.values()]\n scores_values = list(map(list, zip(*scores_values))) # transpose the values\n df_scores = DataFrame(scores_values, columns=scores_columns)\n\n return df_inputs.join(df_scores)\n\n def comparative_individual_scores_report(self, other: \"BaseEvaluationRunResult\") -> DataFrame: # noqa: D102\n if not isinstance(other, EvaluationRunResult):\n raise ValueError(\"Comparative scores can only be computed between EvaluationRunResults.\")\n\n this_name = self.run_name\n other_name = other.run_name\n if this_name == other_name:\n warn(f\"The run names of the two evaluation results are the same ('{this_name}')\")\n this_name = f\"{this_name}_first\"\n other_name = f\"{other_name}_second\"\n\n if self.inputs.keys() != other.inputs.keys():\n warn(f\"The input columns differ between the results; using the input columns of '{this_name}'.\")\n\n pipe_a_df = self.to_pandas()\n pipe_b_df = other.to_pandas()\n\n ignore = list(self.inputs.keys())\n pipe_b_df.drop(columns=ignore, inplace=True, errors=\"ignore\")\n pipe_b_df.columns = [f\"{other_name}_{column}\" for column in pipe_b_df.columns] # type: ignore\n pipe_a_df.columns = [\n f\"{this_name}_{col}\" if col not in ignore else col for col in pipe_a_df.columns\n ] # type: ignore\n\n results_df = pd_concat([pipe_a_df, pipe_b_df], axis=1)\n return results_df\n", "path": "haystack/evaluation/eval_run_result.py"}]}
1,758
175
gh_patches_debug_13129
rasdani/github-patches
git_diff
liqd__a4-meinberlin-613
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- datetimepicker unique ids <img width="980" alt="screen shot 2017-06-01 at 15 59 21" src="https://cloud.githubusercontent.com/assets/16354712/26683219/61f8eb4c-46e3-11e7-851b-148bff927bd4.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `apps/contrib/widgets.py` Content: ``` 1 import datetime 2 from itertools import chain 3 4 import django_filters 5 from django.contrib.staticfiles.storage import staticfiles_storage 6 from django.db.models.fields import BLANK_CHOICE_DASH 7 from django.forms import widgets as form_widgets 8 from django.forms.widgets import flatatt 9 from django.template.loader import render_to_string 10 from django.utils import formats 11 from django.utils.timezone import localtime 12 from django.utils.translation import ugettext as _ 13 14 15 class DropdownLinkWidget(django_filters.widgets.LinkWidget): 16 label = None 17 right = False 18 template = 'meinberlin_contrib/widgets/dropdown_link.html' 19 20 def get_option_label(self, value, choices=()): 21 option_label = BLANK_CHOICE_DASH[0][1] 22 23 for v, label in chain(self.choices, choices): 24 if str(v) == value: 25 option_label = label 26 break 27 28 if option_label == BLANK_CHOICE_DASH[0][1]: 29 option_label = _('All') 30 31 return option_label 32 33 def render(self, name, value, attrs=None, choices=()): 34 all_choices = list(chain(self.choices, choices)) 35 36 if len(all_choices) <= 1: 37 return '' 38 39 if value is None: 40 value = all_choices[0][0] 41 42 _id = attrs.pop('id') 43 final_attrs = flatatt(self.build_attrs(attrs)) 44 value_label = self.get_option_label(value, choices=choices) 45 46 options = super().render(name, value, attrs={ 47 'class': 'dropdown-menu', 48 'aria-labelledby': _id, 49 }, choices=choices) 50 51 return render_to_string(self.template, { 52 'options': options, 53 'id': _id, 54 'attrs': final_attrs, 55 'value_label': value_label, 56 'label': self.label, 57 'right': self.right, 58 }) 59 60 61 class DateTimeInput(form_widgets.SplitDateTimeWidget): 62 class Media: 63 js = ( 64 staticfiles_storage.url('datepicker.js'), 65 ) 66 css = {'all': [ 67 staticfiles_storage.url('datepicker.css'), 68 ]} 69 70 def render(self, name, value, attrs=None): 71 date_attrs = self.build_attrs(attrs) 72 date_attrs.update({ 73 'class': 'datepicker', 74 'placeholder': formats.localize_input(datetime.date.today()) 75 }) 76 time_attrs = self.build_attrs(attrs) 77 time_attrs.update({ 78 'class': 'timepicker', 79 'placeholder': '00:00', 80 }) 81 82 if isinstance(value, datetime.datetime): 83 value = localtime(value) 84 date = value.date() 85 time = value.time() 86 else: 87 # value's just a list in case of an error 88 date = value[0] if value else None 89 time = value[1] if value else '00:00' 90 91 return render_to_string('datetime_input.html', { 92 'date': self.widgets[0].render( 93 name + '_0', 94 date, 95 date_attrs 96 ), 97 'time': self.widgets[1].render( 98 name + '_1', 99 time, 100 time_attrs 101 ) 102 }) 103 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/apps/contrib/widgets.py b/apps/contrib/widgets.py --- a/apps/contrib/widgets.py +++ b/apps/contrib/widgets.py @@ -71,12 +71,14 @@ date_attrs = self.build_attrs(attrs) date_attrs.update({ 'class': 'datepicker', - 'placeholder': formats.localize_input(datetime.date.today()) + 'placeholder': formats.localize_input(datetime.date.today()), + 'id': name + '_date' }) time_attrs = self.build_attrs(attrs) time_attrs.update({ 'class': 'timepicker', 'placeholder': '00:00', + 'id': name + '_time' }) if isinstance(value, datetime.datetime):
{"golden_diff": "diff --git a/apps/contrib/widgets.py b/apps/contrib/widgets.py\n--- a/apps/contrib/widgets.py\n+++ b/apps/contrib/widgets.py\n@@ -71,12 +71,14 @@\n date_attrs = self.build_attrs(attrs)\n date_attrs.update({\n 'class': 'datepicker',\n- 'placeholder': formats.localize_input(datetime.date.today())\n+ 'placeholder': formats.localize_input(datetime.date.today()),\n+ 'id': name + '_date'\n })\n time_attrs = self.build_attrs(attrs)\n time_attrs.update({\n 'class': 'timepicker',\n 'placeholder': '00:00',\n+ 'id': name + '_time'\n })\n \n if isinstance(value, datetime.datetime):\n", "issue": "datetimepicker unique ids\n<img width=\"980\" alt=\"screen shot 2017-06-01 at 15 59 21\" src=\"https://cloud.githubusercontent.com/assets/16354712/26683219/61f8eb4c-46e3-11e7-851b-148bff927bd4.png\">\r\n\n", "before_files": [{"content": "import datetime\nfrom itertools import chain\n\nimport django_filters\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.db.models.fields import BLANK_CHOICE_DASH\nfrom django.forms import widgets as form_widgets\nfrom django.forms.widgets import flatatt\nfrom django.template.loader import render_to_string\nfrom django.utils import formats\nfrom django.utils.timezone import localtime\nfrom django.utils.translation import ugettext as _\n\n\nclass DropdownLinkWidget(django_filters.widgets.LinkWidget):\n label = None\n right = False\n template = 'meinberlin_contrib/widgets/dropdown_link.html'\n\n def get_option_label(self, value, choices=()):\n option_label = BLANK_CHOICE_DASH[0][1]\n\n for v, label in chain(self.choices, choices):\n if str(v) == value:\n option_label = label\n break\n\n if option_label == BLANK_CHOICE_DASH[0][1]:\n option_label = _('All')\n\n return option_label\n\n def render(self, name, value, attrs=None, choices=()):\n all_choices = list(chain(self.choices, choices))\n\n if len(all_choices) <= 1:\n return ''\n\n if value is None:\n value = all_choices[0][0]\n\n _id = attrs.pop('id')\n final_attrs = flatatt(self.build_attrs(attrs))\n value_label = self.get_option_label(value, choices=choices)\n\n options = super().render(name, value, attrs={\n 'class': 'dropdown-menu',\n 'aria-labelledby': _id,\n }, choices=choices)\n\n return render_to_string(self.template, {\n 'options': options,\n 'id': _id,\n 'attrs': final_attrs,\n 'value_label': value_label,\n 'label': self.label,\n 'right': self.right,\n })\n\n\nclass DateTimeInput(form_widgets.SplitDateTimeWidget):\n class Media:\n js = (\n staticfiles_storage.url('datepicker.js'),\n )\n css = {'all': [\n staticfiles_storage.url('datepicker.css'),\n ]}\n\n def render(self, name, value, attrs=None):\n date_attrs = self.build_attrs(attrs)\n date_attrs.update({\n 'class': 'datepicker',\n 'placeholder': formats.localize_input(datetime.date.today())\n })\n time_attrs = self.build_attrs(attrs)\n time_attrs.update({\n 'class': 'timepicker',\n 'placeholder': '00:00',\n })\n\n if isinstance(value, datetime.datetime):\n value = localtime(value)\n date = value.date()\n time = value.time()\n else:\n # value's just a list in case of an error\n date = value[0] if value else None\n time = value[1] if value else '00:00'\n\n return render_to_string('datetime_input.html', {\n 'date': self.widgets[0].render(\n name + '_0',\n date,\n date_attrs\n ),\n 'time': self.widgets[1].render(\n name + '_1',\n time,\n time_attrs\n )\n })\n", "path": "apps/contrib/widgets.py"}], "after_files": [{"content": "import datetime\nfrom itertools import chain\n\nimport django_filters\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.db.models.fields import BLANK_CHOICE_DASH\nfrom django.forms import widgets as form_widgets\nfrom django.forms.widgets import flatatt\nfrom django.template.loader import render_to_string\nfrom django.utils import formats\nfrom django.utils.timezone import localtime\nfrom django.utils.translation import ugettext as _\n\n\nclass DropdownLinkWidget(django_filters.widgets.LinkWidget):\n label = None\n right = False\n template = 'meinberlin_contrib/widgets/dropdown_link.html'\n\n def get_option_label(self, value, choices=()):\n option_label = BLANK_CHOICE_DASH[0][1]\n\n for v, label in chain(self.choices, choices):\n if str(v) == value:\n option_label = label\n break\n\n if option_label == BLANK_CHOICE_DASH[0][1]:\n option_label = _('All')\n\n return option_label\n\n def render(self, name, value, attrs=None, choices=()):\n all_choices = list(chain(self.choices, choices))\n\n if len(all_choices) <= 1:\n return ''\n\n if value is None:\n value = all_choices[0][0]\n\n _id = attrs.pop('id')\n final_attrs = flatatt(self.build_attrs(attrs))\n value_label = self.get_option_label(value, choices=choices)\n\n options = super().render(name, value, attrs={\n 'class': 'dropdown-menu',\n 'aria-labelledby': _id,\n }, choices=choices)\n\n return render_to_string(self.template, {\n 'options': options,\n 'id': _id,\n 'attrs': final_attrs,\n 'value_label': value_label,\n 'label': self.label,\n 'right': self.right,\n })\n\n\nclass DateTimeInput(form_widgets.SplitDateTimeWidget):\n class Media:\n js = (\n staticfiles_storage.url('datepicker.js'),\n )\n css = {'all': [\n staticfiles_storage.url('datepicker.css'),\n ]}\n\n def render(self, name, value, attrs=None):\n date_attrs = self.build_attrs(attrs)\n date_attrs.update({\n 'class': 'datepicker',\n 'placeholder': formats.localize_input(datetime.date.today()),\n 'id': name + '_date'\n })\n time_attrs = self.build_attrs(attrs)\n time_attrs.update({\n 'class': 'timepicker',\n 'placeholder': '00:00',\n 'id': name + '_time'\n })\n\n if isinstance(value, datetime.datetime):\n value = localtime(value)\n date = value.date()\n time = value.time()\n else:\n # value's just a list in case of an error\n date = value[0] if value else None\n time = value[1] if value else '00:00'\n\n return render_to_string('datetime_input.html', {\n 'date': self.widgets[0].render(\n name + '_0',\n date,\n date_attrs\n ),\n 'time': self.widgets[1].render(\n name + '_1',\n time,\n time_attrs\n )\n })\n", "path": "apps/contrib/widgets.py"}]}
1,223
159
gh_patches_debug_17569
rasdani/github-patches
git_diff
python-discord__site-304
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make newlines visible in the deleted messages front-end The deleted messages front-end currently doesn't display newlines if they're consecutive, i.e., if the lines are otherwise empty (`"\n\n\n\n\n\n\n\n\n"`). This makes it difficult to interpret what actually happened in chat. A good solution to this would be to make newlines character visible using the `↵` character, printed in a light gray color (similar to how IDEs do it). **before** ![2019-10-27_18-30](https://user-images.githubusercontent.com/33516116/67638694-d53a1c80-f8e7-11e9-9971-d022a1d90a17.png) **proposed after** ![2019-10-27_18-29](https://user-images.githubusercontent.com/33516116/67638696-e08d4800-f8e7-11e9-8a7d-acf41e2b1f8a.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pydis_site/apps/staff/templatetags/deletedmessage_filters.py` Content: ``` 1 from datetime import datetime 2 3 from django import template 4 5 register = template.Library() 6 7 8 @register.filter 9 def hex_colour(color: int) -> str: 10 """Converts an integer representation of a colour to the RGB hex value.""" 11 return f"#{color:0>6X}" 12 13 14 @register.filter 15 def footer_datetime(timestamp: str) -> datetime: 16 """Takes an embed timestamp and returns a timezone-aware datetime object.""" 17 return datetime.fromisoformat(timestamp) 18 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py --- a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py +++ b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py @@ -7,11 +7,22 @@ @register.filter def hex_colour(color: int) -> str: - """Converts an integer representation of a colour to the RGB hex value.""" - return f"#{color:0>6X}" + """ + Converts an integer representation of a colour to the RGB hex value. + + As we are using a Discord dark theme analogue, black colours are returned as white instead. + """ + colour = f"#{color:0>6X}" + return colour if colour != "#000000" else "#FFFFFF" @register.filter def footer_datetime(timestamp: str) -> datetime: """Takes an embed timestamp and returns a timezone-aware datetime object.""" return datetime.fromisoformat(timestamp) + + +@register.filter +def visible_newlines(text: str) -> str: + """Takes an embed timestamp and returns a timezone-aware datetime object.""" + return text.replace("\n", " <span class='has-text-grey'>↵</span><br>")
{"golden_diff": "diff --git a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py\n--- a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py\n+++ b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py\n@@ -7,11 +7,22 @@\n \n @register.filter\n def hex_colour(color: int) -> str:\n- \"\"\"Converts an integer representation of a colour to the RGB hex value.\"\"\"\n- return f\"#{color:0>6X}\"\n+ \"\"\"\n+ Converts an integer representation of a colour to the RGB hex value.\n+\n+ As we are using a Discord dark theme analogue, black colours are returned as white instead.\n+ \"\"\"\n+ colour = f\"#{color:0>6X}\"\n+ return colour if colour != \"#000000\" else \"#FFFFFF\"\n \n \n @register.filter\n def footer_datetime(timestamp: str) -> datetime:\n \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n return datetime.fromisoformat(timestamp)\n+\n+\n+@register.filter\n+def visible_newlines(text: str) -> str:\n+ \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n+ return text.replace(\"\\n\", \" <span class='has-text-grey'>\u21b5</span><br>\")\n", "issue": "Make newlines visible in the deleted messages front-end\nThe deleted messages front-end currently doesn't display newlines if they're consecutive, i.e., if the lines are otherwise empty (`\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\"`). This makes it difficult to interpret what actually happened in chat. A good solution to this would be to make newlines character visible using the `\u21b5` character, printed in a light gray color (similar to how IDEs do it).\r\n\r\n**before**\r\n![2019-10-27_18-30](https://user-images.githubusercontent.com/33516116/67638694-d53a1c80-f8e7-11e9-9971-d022a1d90a17.png)\r\n\r\n**proposed after**\r\n![2019-10-27_18-29](https://user-images.githubusercontent.com/33516116/67638696-e08d4800-f8e7-11e9-8a7d-acf41e2b1f8a.png)\r\n\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django import template\n\nregister = template.Library()\n\n\n@register.filter\ndef hex_colour(color: int) -> str:\n \"\"\"Converts an integer representation of a colour to the RGB hex value.\"\"\"\n return f\"#{color:0>6X}\"\n\n\n@register.filter\ndef footer_datetime(timestamp: str) -> datetime:\n \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n return datetime.fromisoformat(timestamp)\n", "path": "pydis_site/apps/staff/templatetags/deletedmessage_filters.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom django import template\n\nregister = template.Library()\n\n\n@register.filter\ndef hex_colour(color: int) -> str:\n \"\"\"\n Converts an integer representation of a colour to the RGB hex value.\n\n As we are using a Discord dark theme analogue, black colours are returned as white instead.\n \"\"\"\n colour = f\"#{color:0>6X}\"\n return colour if colour != \"#000000\" else \"#FFFFFF\"\n\n\n@register.filter\ndef footer_datetime(timestamp: str) -> datetime:\n \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n return datetime.fromisoformat(timestamp)\n\n\n@register.filter\ndef visible_newlines(text: str) -> str:\n \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n return text.replace(\"\\n\", \" <span class='has-text-grey'>\u21b5</span><br>\")\n", "path": "pydis_site/apps/staff/templatetags/deletedmessage_filters.py"}]}
659
304
gh_patches_debug_18377
rasdani/github-patches
git_diff
open-mmlab__mmdetection-4300
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Empirical attention with ResNext backbone In mmdetectiom v1 I used ResNext backbone with empirical attention 0010_dcn but in v2 I got an error. Any ideas how to fix this? My updated config file: ``` _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( pretrained='open-mmlab://resnext50_32x4d', backbone=dict( type='ResNeXt', depth=50, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ], dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ``` Error: ``` RuntimeError: Given groups=1, weight of size [256, 256, 1, 1], expected input[2, 512, 41, 73] to have 256 channels, but got 512 channels instead ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mmdet/models/backbones/resnext.py` Content: ``` 1 import math 2 3 from mmcv.cnn import build_conv_layer, build_norm_layer 4 5 from ..builder import BACKBONES 6 from ..utils import ResLayer 7 from .resnet import Bottleneck as _Bottleneck 8 from .resnet import ResNet 9 10 11 class Bottleneck(_Bottleneck): 12 expansion = 4 13 14 def __init__(self, 15 inplanes, 16 planes, 17 groups=1, 18 base_width=4, 19 base_channels=64, 20 **kwargs): 21 """Bottleneck block for ResNeXt. 22 23 If style is "pytorch", the stride-two layer is the 3x3 conv layer, if 24 it is "caffe", the stride-two layer is the first 1x1 conv layer. 25 """ 26 super(Bottleneck, self).__init__(inplanes, planes, **kwargs) 27 28 if groups == 1: 29 width = self.planes 30 else: 31 width = math.floor(self.planes * 32 (base_width / base_channels)) * groups 33 34 self.norm1_name, norm1 = build_norm_layer( 35 self.norm_cfg, width, postfix=1) 36 self.norm2_name, norm2 = build_norm_layer( 37 self.norm_cfg, width, postfix=2) 38 self.norm3_name, norm3 = build_norm_layer( 39 self.norm_cfg, self.planes * self.expansion, postfix=3) 40 41 self.conv1 = build_conv_layer( 42 self.conv_cfg, 43 self.inplanes, 44 width, 45 kernel_size=1, 46 stride=self.conv1_stride, 47 bias=False) 48 self.add_module(self.norm1_name, norm1) 49 fallback_on_stride = False 50 self.with_modulated_dcn = False 51 if self.with_dcn: 52 fallback_on_stride = self.dcn.pop('fallback_on_stride', False) 53 if not self.with_dcn or fallback_on_stride: 54 self.conv2 = build_conv_layer( 55 self.conv_cfg, 56 width, 57 width, 58 kernel_size=3, 59 stride=self.conv2_stride, 60 padding=self.dilation, 61 dilation=self.dilation, 62 groups=groups, 63 bias=False) 64 else: 65 assert self.conv_cfg is None, 'conv_cfg must be None for DCN' 66 self.conv2 = build_conv_layer( 67 self.dcn, 68 width, 69 width, 70 kernel_size=3, 71 stride=self.conv2_stride, 72 padding=self.dilation, 73 dilation=self.dilation, 74 groups=groups, 75 bias=False) 76 77 self.add_module(self.norm2_name, norm2) 78 self.conv3 = build_conv_layer( 79 self.conv_cfg, 80 width, 81 self.planes * self.expansion, 82 kernel_size=1, 83 bias=False) 84 self.add_module(self.norm3_name, norm3) 85 86 87 @BACKBONES.register_module() 88 class ResNeXt(ResNet): 89 """ResNeXt backbone. 90 91 Args: 92 depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. 93 in_channels (int): Number of input image channels. Default: 3. 94 num_stages (int): Resnet stages. Default: 4. 95 groups (int): Group of resnext. 96 base_width (int): Base width of resnext. 97 strides (Sequence[int]): Strides of the first block of each stage. 98 dilations (Sequence[int]): Dilation of each stage. 99 out_indices (Sequence[int]): Output from which stages. 100 style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two 101 layer is the 3x3 conv layer, otherwise the stride-two layer is 102 the first 1x1 conv layer. 103 frozen_stages (int): Stages to be frozen (all param fixed). -1 means 104 not freezing any parameters. 105 norm_cfg (dict): dictionary to construct and config norm layer. 106 norm_eval (bool): Whether to set norm layers to eval mode, namely, 107 freeze running stats (mean and var). Note: Effect on Batch Norm 108 and its variants only. 109 with_cp (bool): Use checkpoint or not. Using checkpoint will save some 110 memory while slowing down the training speed. 111 zero_init_residual (bool): whether to use zero init for last norm layer 112 in resblocks to let them behave as identity. 113 """ 114 115 arch_settings = { 116 50: (Bottleneck, (3, 4, 6, 3)), 117 101: (Bottleneck, (3, 4, 23, 3)), 118 152: (Bottleneck, (3, 8, 36, 3)) 119 } 120 121 def __init__(self, groups=1, base_width=4, **kwargs): 122 self.groups = groups 123 self.base_width = base_width 124 super(ResNeXt, self).__init__(**kwargs) 125 126 def make_res_layer(self, **kwargs): 127 """Pack all blocks in a stage into a ``ResLayer``""" 128 return ResLayer( 129 groups=self.groups, 130 base_width=self.base_width, 131 base_channels=self.base_channels, 132 **kwargs) 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mmdet/models/backbones/resnext.py b/mmdet/models/backbones/resnext.py --- a/mmdet/models/backbones/resnext.py +++ b/mmdet/models/backbones/resnext.py @@ -83,6 +83,27 @@ bias=False) self.add_module(self.norm3_name, norm3) + if self.with_plugins: + self._del_block_plugins(self.after_conv1_plugin_names + + self.after_conv2_plugin_names + + self.after_conv3_plugin_names) + self.after_conv1_plugin_names = self.make_block_plugins( + width, self.after_conv1_plugins) + self.after_conv2_plugin_names = self.make_block_plugins( + width, self.after_conv2_plugins) + self.after_conv3_plugin_names = self.make_block_plugins( + self.planes * self.expansion, self.after_conv3_plugins) + + def _del_block_plugins(self, plugin_names): + """delete plugins for block if exist. + + Args: + plugin_names (list[str]): List of plugins name to delete. + """ + assert isinstance(plugin_names, list) + for plugin_name in plugin_names: + del self._modules[plugin_name] + @BACKBONES.register_module() class ResNeXt(ResNet):
{"golden_diff": "diff --git a/mmdet/models/backbones/resnext.py b/mmdet/models/backbones/resnext.py\n--- a/mmdet/models/backbones/resnext.py\n+++ b/mmdet/models/backbones/resnext.py\n@@ -83,6 +83,27 @@\n bias=False)\n self.add_module(self.norm3_name, norm3)\n \n+ if self.with_plugins:\n+ self._del_block_plugins(self.after_conv1_plugin_names +\n+ self.after_conv2_plugin_names +\n+ self.after_conv3_plugin_names)\n+ self.after_conv1_plugin_names = self.make_block_plugins(\n+ width, self.after_conv1_plugins)\n+ self.after_conv2_plugin_names = self.make_block_plugins(\n+ width, self.after_conv2_plugins)\n+ self.after_conv3_plugin_names = self.make_block_plugins(\n+ self.planes * self.expansion, self.after_conv3_plugins)\n+\n+ def _del_block_plugins(self, plugin_names):\n+ \"\"\"delete plugins for block if exist.\n+\n+ Args:\n+ plugin_names (list[str]): List of plugins name to delete.\n+ \"\"\"\n+ assert isinstance(plugin_names, list)\n+ for plugin_name in plugin_names:\n+ del self._modules[plugin_name]\n+\n \n @BACKBONES.register_module()\n class ResNeXt(ResNet):\n", "issue": "Empirical attention with ResNext backbone\nIn mmdetectiom v1 I used ResNext backbone with empirical attention 0010_dcn but in v2 I got an error.\r\nAny ideas how to fix this?\r\n\r\nMy updated config file:\r\n```\r\n_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\r\nmodel = dict(\r\n\r\n pretrained='open-mmlab://resnext50_32x4d',\r\n backbone=dict(\r\n type='ResNeXt',\r\n depth=50,\r\n groups=32,\r\n base_width=4,\r\n num_stages=4,\r\n out_indices=(0, 1, 2, 3),\r\n frozen_stages=1,\r\n norm_cfg=dict(type='BN', requires_grad=True),\r\n style='pytorch',\r\n plugins=[\r\n dict(\r\n cfg=dict(\r\n type='GeneralizedAttention',\r\n spatial_range=-1,\r\n num_heads=8,\r\n attention_type='0010',\r\n kv_stride=2),\r\n stages=(False, False, True, True),\r\n position='after_conv2')\r\n ],\r\n dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),\r\n stage_with_dcn=(False, True, True, True)))\r\n```\r\n\r\nError:\r\n```\r\nRuntimeError: Given groups=1, weight of size [256, 256, 1, 1], expected input[2, 512, 41, 73] to have 256 channels, but got 512 channels instead\r\n```\n", "before_files": [{"content": "import math\n\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\n\nfrom ..builder import BACKBONES\nfrom ..utils import ResLayer\nfrom .resnet import Bottleneck as _Bottleneck\nfrom .resnet import ResNet\n\n\nclass Bottleneck(_Bottleneck):\n expansion = 4\n\n def __init__(self,\n inplanes,\n planes,\n groups=1,\n base_width=4,\n base_channels=64,\n **kwargs):\n \"\"\"Bottleneck block for ResNeXt.\n\n If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if\n it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n \"\"\"\n super(Bottleneck, self).__init__(inplanes, planes, **kwargs)\n\n if groups == 1:\n width = self.planes\n else:\n width = math.floor(self.planes *\n (base_width / base_channels)) * groups\n\n self.norm1_name, norm1 = build_norm_layer(\n self.norm_cfg, width, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(\n self.norm_cfg, width, postfix=2)\n self.norm3_name, norm3 = build_norm_layer(\n self.norm_cfg, self.planes * self.expansion, postfix=3)\n\n self.conv1 = build_conv_layer(\n self.conv_cfg,\n self.inplanes,\n width,\n kernel_size=1,\n stride=self.conv1_stride,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n fallback_on_stride = False\n self.with_modulated_dcn = False\n if self.with_dcn:\n fallback_on_stride = self.dcn.pop('fallback_on_stride', False)\n if not self.with_dcn or fallback_on_stride:\n self.conv2 = build_conv_layer(\n self.conv_cfg,\n width,\n width,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=self.dilation,\n dilation=self.dilation,\n groups=groups,\n bias=False)\n else:\n assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n self.conv2 = build_conv_layer(\n self.dcn,\n width,\n width,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=self.dilation,\n dilation=self.dilation,\n groups=groups,\n bias=False)\n\n self.add_module(self.norm2_name, norm2)\n self.conv3 = build_conv_layer(\n self.conv_cfg,\n width,\n self.planes * self.expansion,\n kernel_size=1,\n bias=False)\n self.add_module(self.norm3_name, norm3)\n\n\n@BACKBONES.register_module()\nclass ResNeXt(ResNet):\n \"\"\"ResNeXt backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Default: 3.\n num_stages (int): Resnet stages. Default: 4.\n groups (int): Group of resnext.\n base_width (int): Base width of resnext.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n \"\"\"\n\n arch_settings = {\n 50: (Bottleneck, (3, 4, 6, 3)),\n 101: (Bottleneck, (3, 4, 23, 3)),\n 152: (Bottleneck, (3, 8, 36, 3))\n }\n\n def __init__(self, groups=1, base_width=4, **kwargs):\n self.groups = groups\n self.base_width = base_width\n super(ResNeXt, self).__init__(**kwargs)\n\n def make_res_layer(self, **kwargs):\n \"\"\"Pack all blocks in a stage into a ``ResLayer``\"\"\"\n return ResLayer(\n groups=self.groups,\n base_width=self.base_width,\n base_channels=self.base_channels,\n **kwargs)\n", "path": "mmdet/models/backbones/resnext.py"}], "after_files": [{"content": "import math\n\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\n\nfrom ..builder import BACKBONES\nfrom ..utils import ResLayer\nfrom .resnet import Bottleneck as _Bottleneck\nfrom .resnet import ResNet\n\n\nclass Bottleneck(_Bottleneck):\n expansion = 4\n\n def __init__(self,\n inplanes,\n planes,\n groups=1,\n base_width=4,\n base_channels=64,\n **kwargs):\n \"\"\"Bottleneck block for ResNeXt.\n\n If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if\n it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n \"\"\"\n super(Bottleneck, self).__init__(inplanes, planes, **kwargs)\n\n if groups == 1:\n width = self.planes\n else:\n width = math.floor(self.planes *\n (base_width / base_channels)) * groups\n\n self.norm1_name, norm1 = build_norm_layer(\n self.norm_cfg, width, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(\n self.norm_cfg, width, postfix=2)\n self.norm3_name, norm3 = build_norm_layer(\n self.norm_cfg, self.planes * self.expansion, postfix=3)\n\n self.conv1 = build_conv_layer(\n self.conv_cfg,\n self.inplanes,\n width,\n kernel_size=1,\n stride=self.conv1_stride,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n fallback_on_stride = False\n self.with_modulated_dcn = False\n if self.with_dcn:\n fallback_on_stride = self.dcn.pop('fallback_on_stride', False)\n if not self.with_dcn or fallback_on_stride:\n self.conv2 = build_conv_layer(\n self.conv_cfg,\n width,\n width,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=self.dilation,\n dilation=self.dilation,\n groups=groups,\n bias=False)\n else:\n assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n self.conv2 = build_conv_layer(\n self.dcn,\n width,\n width,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=self.dilation,\n dilation=self.dilation,\n groups=groups,\n bias=False)\n\n self.add_module(self.norm2_name, norm2)\n self.conv3 = build_conv_layer(\n self.conv_cfg,\n width,\n self.planes * self.expansion,\n kernel_size=1,\n bias=False)\n self.add_module(self.norm3_name, norm3)\n\n if self.with_plugins:\n self._del_block_plugins(self.after_conv1_plugin_names +\n self.after_conv2_plugin_names +\n self.after_conv3_plugin_names)\n self.after_conv1_plugin_names = self.make_block_plugins(\n width, self.after_conv1_plugins)\n self.after_conv2_plugin_names = self.make_block_plugins(\n width, self.after_conv2_plugins)\n self.after_conv3_plugin_names = self.make_block_plugins(\n self.planes * self.expansion, self.after_conv3_plugins)\n\n def _del_block_plugins(self, plugin_names):\n \"\"\"delete plugins for block if exist.\n\n Args:\n plugin_names (list[str]): List of plugins name to delete.\n \"\"\"\n assert isinstance(plugin_names, list)\n for plugin_name in plugin_names:\n del self._modules[plugin_name]\n\n\n@BACKBONES.register_module()\nclass ResNeXt(ResNet):\n \"\"\"ResNeXt backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Default: 3.\n num_stages (int): Resnet stages. Default: 4.\n groups (int): Group of resnext.\n base_width (int): Base width of resnext.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n \"\"\"\n\n arch_settings = {\n 50: (Bottleneck, (3, 4, 6, 3)),\n 101: (Bottleneck, (3, 4, 23, 3)),\n 152: (Bottleneck, (3, 8, 36, 3))\n }\n\n def __init__(self, groups=1, base_width=4, **kwargs):\n self.groups = groups\n self.base_width = base_width\n super(ResNeXt, self).__init__(**kwargs)\n\n def make_res_layer(self, **kwargs):\n \"\"\"Pack all blocks in a stage into a ``ResLayer``\"\"\"\n return ResLayer(\n groups=self.groups,\n base_width=self.base_width,\n base_channels=self.base_channels,\n **kwargs)\n", "path": "mmdet/models/backbones/resnext.py"}]}
2,024
287
gh_patches_debug_1191
rasdani/github-patches
git_diff
huggingface__dataset-viewer-2409
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Retry jobs that finish with `ClientConnection` error? Maybe here: https://github.com/huggingface/datasets-server/blob/f311a9212aaa91dd0373e5c2d4f5da9b6bdabcb5/chart/env/prod.yaml#L209 Internal conversation on Slack: https://huggingface.slack.com/archives/C0311GZ7R6K/p1698224875005729 Anyway: I'm wondering if we can have the error now that the dataset scripts are disabled by default. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libs/libcommon/src/libcommon/constants.py` Content: ``` 1 # SPDX-License-Identifier: Apache-2.0 2 # Copyright 2022 The HuggingFace Authors. 3 4 CACHE_COLLECTION_RESPONSES = "cachedResponsesBlue" 5 CACHE_MONGOENGINE_ALIAS = "cache" 6 HF_DATASETS_CACHE_APPNAME = "hf_datasets_cache" 7 PARQUET_METADATA_CACHE_APPNAME = "datasets_server_parquet_metadata" 8 DESCRIPTIVE_STATISTICS_CACHE_APPNAME = "datasets_server_descriptive_statistics" 9 DUCKDB_INDEX_CACHE_APPNAME = "datasets_server_duckdb_index" 10 DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY = "downloads" 11 DUCKDB_INDEX_JOB_RUNNER_SUBDIRECTORY = "job_runner" 12 CACHE_METRICS_COLLECTION = "cacheTotalMetric" 13 QUEUE_METRICS_COLLECTION = "jobTotalMetric" 14 METRICS_MONGOENGINE_ALIAS = "metrics" 15 QUEUE_COLLECTION_JOBS = "jobsBlue" 16 QUEUE_COLLECTION_LOCKS = "locks" 17 QUEUE_MONGOENGINE_ALIAS = "queue" 18 QUEUE_TTL_SECONDS = 600 # 10 minutes 19 LOCK_TTL_SECONDS_NO_OWNER = 600 # 10 minutes 20 LOCK_TTL_SECONDS_TO_START_JOB = 600 # 10 minutes 21 LOCK_TTL_SECONDS_TO_WRITE_ON_GIT_BRANCH = 3600 # 1 hour 22 23 MAX_FAILED_RUNS = 3 24 DATASET_SEPARATOR = "--" 25 DEFAULT_DIFFICULTY = 50 26 DEFAULT_DIFFICULTY_MAX = 100 27 DEFAULT_DIFFICULTY_MIN = 0 28 DEFAULT_INPUT_TYPE = "dataset" 29 DEFAULT_JOB_RUNNER_VERSION = 1 30 DIFFICULTY_BONUS_BY_FAILED_RUNS = 20 31 MIN_BYTES_FOR_BONUS_DIFFICULTY = 3_000_000_000 32 33 PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100 34 PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100 35 PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100 36 PARQUET_REVISION = "refs/convert/parquet" 37 38 ERROR_CODES_TO_RETRY = { 39 "CreateCommitError", 40 "ExternalServerError", 41 "JobManagerCrashedError", 42 "LockedDatasetTimeoutError", 43 "StreamingRowsError", 44 } 45 46 EXTERNAL_DATASET_SCRIPT_PATTERN = "datasets_modules/datasets" 47 48 # Arrays are not immutable, we have to take care of not modifying them 49 # Anyway: in all this file, we allow constant reassignment (no use of Final) 50 CONFIG_HAS_VIEWER_KINDS = ["config-size"] 51 CONFIG_INFO_KINDS = ["config-info"] 52 CONFIG_PARQUET_METADATA_KINDS = ["config-parquet-metadata"] 53 CONFIG_PARQUET_AND_METADATA_KINDS = ["config-parquet", "config-parquet-metadata"] 54 CONFIG_SPLIT_NAMES_KINDS = ["config-split-names-from-info", "config-split-names-from-streaming"] 55 DATASET_CONFIG_NAMES_KINDS = ["dataset-config-names"] 56 DATASET_INFO_KINDS = ["dataset-info"] 57 SPLIT_DUCKDB_INDEX_KINDS = ["split-duckdb-index"] 58 SPLIT_HAS_PREVIEW_KINDS = ["split-first-rows-from-streaming", "split-first-rows-from-parquet"] 59 SPLIT_HAS_SEARCH_KINDS = ["split-duckdb-index"] 60 PARALLEL_STEPS_LISTS = [ 61 CONFIG_SPLIT_NAMES_KINDS, 62 SPLIT_HAS_PREVIEW_KINDS, 63 ] 64 65 CROISSANT_MAX_CONFIGS = 100 66 MAX_NUM_ROWS_PER_PAGE = 100 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py --- a/libs/libcommon/src/libcommon/constants.py +++ b/libs/libcommon/src/libcommon/constants.py @@ -36,6 +36,7 @@ PARQUET_REVISION = "refs/convert/parquet" ERROR_CODES_TO_RETRY = { + "ConnectionError", "CreateCommitError", "ExternalServerError", "JobManagerCrashedError",
{"golden_diff": "diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py\n--- a/libs/libcommon/src/libcommon/constants.py\n+++ b/libs/libcommon/src/libcommon/constants.py\n@@ -36,6 +36,7 @@\n PARQUET_REVISION = \"refs/convert/parquet\"\n \n ERROR_CODES_TO_RETRY = {\n+ \"ConnectionError\",\n \"CreateCommitError\",\n \"ExternalServerError\",\n \"JobManagerCrashedError\",\n", "issue": "Retry jobs that finish with `ClientConnection` error?\nMaybe here: https://github.com/huggingface/datasets-server/blob/f311a9212aaa91dd0373e5c2d4f5da9b6bdabcb5/chart/env/prod.yaml#L209\r\n\r\nInternal conversation on Slack: https://huggingface.slack.com/archives/C0311GZ7R6K/p1698224875005729\r\n\r\nAnyway: I'm wondering if we can have the error now that the dataset scripts are disabled by default.\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nCACHE_COLLECTION_RESPONSES = \"cachedResponsesBlue\"\nCACHE_MONGOENGINE_ALIAS = \"cache\"\nHF_DATASETS_CACHE_APPNAME = \"hf_datasets_cache\"\nPARQUET_METADATA_CACHE_APPNAME = \"datasets_server_parquet_metadata\"\nDESCRIPTIVE_STATISTICS_CACHE_APPNAME = \"datasets_server_descriptive_statistics\"\nDUCKDB_INDEX_CACHE_APPNAME = \"datasets_server_duckdb_index\"\nDUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY = \"downloads\"\nDUCKDB_INDEX_JOB_RUNNER_SUBDIRECTORY = \"job_runner\"\nCACHE_METRICS_COLLECTION = \"cacheTotalMetric\"\nQUEUE_METRICS_COLLECTION = \"jobTotalMetric\"\nMETRICS_MONGOENGINE_ALIAS = \"metrics\"\nQUEUE_COLLECTION_JOBS = \"jobsBlue\"\nQUEUE_COLLECTION_LOCKS = \"locks\"\nQUEUE_MONGOENGINE_ALIAS = \"queue\"\nQUEUE_TTL_SECONDS = 600 # 10 minutes\nLOCK_TTL_SECONDS_NO_OWNER = 600 # 10 minutes\nLOCK_TTL_SECONDS_TO_START_JOB = 600 # 10 minutes\nLOCK_TTL_SECONDS_TO_WRITE_ON_GIT_BRANCH = 3600 # 1 hour\n\nMAX_FAILED_RUNS = 3\nDATASET_SEPARATOR = \"--\"\nDEFAULT_DIFFICULTY = 50\nDEFAULT_DIFFICULTY_MAX = 100\nDEFAULT_DIFFICULTY_MIN = 0\nDEFAULT_INPUT_TYPE = \"dataset\"\nDEFAULT_JOB_RUNNER_VERSION = 1\nDIFFICULTY_BONUS_BY_FAILED_RUNS = 20\nMIN_BYTES_FOR_BONUS_DIFFICULTY = 3_000_000_000\n\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100\nPARQUET_REVISION = \"refs/convert/parquet\"\n\nERROR_CODES_TO_RETRY = {\n \"CreateCommitError\",\n \"ExternalServerError\",\n \"JobManagerCrashedError\",\n \"LockedDatasetTimeoutError\",\n \"StreamingRowsError\",\n}\n\nEXTERNAL_DATASET_SCRIPT_PATTERN = \"datasets_modules/datasets\"\n\n# Arrays are not immutable, we have to take care of not modifying them\n# Anyway: in all this file, we allow constant reassignment (no use of Final)\nCONFIG_HAS_VIEWER_KINDS = [\"config-size\"]\nCONFIG_INFO_KINDS = [\"config-info\"]\nCONFIG_PARQUET_METADATA_KINDS = [\"config-parquet-metadata\"]\nCONFIG_PARQUET_AND_METADATA_KINDS = [\"config-parquet\", \"config-parquet-metadata\"]\nCONFIG_SPLIT_NAMES_KINDS = [\"config-split-names-from-info\", \"config-split-names-from-streaming\"]\nDATASET_CONFIG_NAMES_KINDS = [\"dataset-config-names\"]\nDATASET_INFO_KINDS = [\"dataset-info\"]\nSPLIT_DUCKDB_INDEX_KINDS = [\"split-duckdb-index\"]\nSPLIT_HAS_PREVIEW_KINDS = [\"split-first-rows-from-streaming\", \"split-first-rows-from-parquet\"]\nSPLIT_HAS_SEARCH_KINDS = [\"split-duckdb-index\"]\nPARALLEL_STEPS_LISTS = [\n CONFIG_SPLIT_NAMES_KINDS,\n SPLIT_HAS_PREVIEW_KINDS,\n]\n\nCROISSANT_MAX_CONFIGS = 100\nMAX_NUM_ROWS_PER_PAGE = 100\n", "path": "libs/libcommon/src/libcommon/constants.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nCACHE_COLLECTION_RESPONSES = \"cachedResponsesBlue\"\nCACHE_MONGOENGINE_ALIAS = \"cache\"\nHF_DATASETS_CACHE_APPNAME = \"hf_datasets_cache\"\nPARQUET_METADATA_CACHE_APPNAME = \"datasets_server_parquet_metadata\"\nDESCRIPTIVE_STATISTICS_CACHE_APPNAME = \"datasets_server_descriptive_statistics\"\nDUCKDB_INDEX_CACHE_APPNAME = \"datasets_server_duckdb_index\"\nDUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY = \"downloads\"\nDUCKDB_INDEX_JOB_RUNNER_SUBDIRECTORY = \"job_runner\"\nCACHE_METRICS_COLLECTION = \"cacheTotalMetric\"\nQUEUE_METRICS_COLLECTION = \"jobTotalMetric\"\nMETRICS_MONGOENGINE_ALIAS = \"metrics\"\nQUEUE_COLLECTION_JOBS = \"jobsBlue\"\nQUEUE_COLLECTION_LOCKS = \"locks\"\nQUEUE_MONGOENGINE_ALIAS = \"queue\"\nQUEUE_TTL_SECONDS = 600 # 10 minutes\nLOCK_TTL_SECONDS_NO_OWNER = 600 # 10 minutes\nLOCK_TTL_SECONDS_TO_START_JOB = 600 # 10 minutes\nLOCK_TTL_SECONDS_TO_WRITE_ON_GIT_BRANCH = 3600 # 1 hour\n\nMAX_FAILED_RUNS = 3\nDATASET_SEPARATOR = \"--\"\nDEFAULT_DIFFICULTY = 50\nDEFAULT_DIFFICULTY_MAX = 100\nDEFAULT_DIFFICULTY_MIN = 0\nDEFAULT_INPUT_TYPE = \"dataset\"\nDEFAULT_JOB_RUNNER_VERSION = 1\nDIFFICULTY_BONUS_BY_FAILED_RUNS = 20\nMIN_BYTES_FOR_BONUS_DIFFICULTY = 3_000_000_000\n\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100\nPARQUET_REVISION = \"refs/convert/parquet\"\n\nERROR_CODES_TO_RETRY = {\n \"ConnectionError\",\n \"CreateCommitError\",\n \"ExternalServerError\",\n \"JobManagerCrashedError\",\n \"LockedDatasetTimeoutError\",\n \"StreamingRowsError\",\n}\n\nEXTERNAL_DATASET_SCRIPT_PATTERN = \"datasets_modules/datasets\"\n\n# Arrays are not immutable, we have to take care of not modifying them\n# Anyway: in all this file, we allow constant reassignment (no use of Final)\nCONFIG_HAS_VIEWER_KINDS = [\"config-size\"]\nCONFIG_INFO_KINDS = [\"config-info\"]\nCONFIG_PARQUET_METADATA_KINDS = [\"config-parquet-metadata\"]\nCONFIG_PARQUET_AND_METADATA_KINDS = [\"config-parquet\", \"config-parquet-metadata\"]\nCONFIG_SPLIT_NAMES_KINDS = [\"config-split-names-from-info\", \"config-split-names-from-streaming\"]\nDATASET_CONFIG_NAMES_KINDS = [\"dataset-config-names\"]\nDATASET_INFO_KINDS = [\"dataset-info\"]\nSPLIT_DUCKDB_INDEX_KINDS = [\"split-duckdb-index\"]\nSPLIT_HAS_PREVIEW_KINDS = [\"split-first-rows-from-streaming\", \"split-first-rows-from-parquet\"]\nSPLIT_HAS_SEARCH_KINDS = [\"split-duckdb-index\"]\nPARALLEL_STEPS_LISTS = [\n CONFIG_SPLIT_NAMES_KINDS,\n SPLIT_HAS_PREVIEW_KINDS,\n]\n\nMAX_NUM_ROWS_PER_PAGE = 100\n", "path": "libs/libcommon/src/libcommon/constants.py"}]}
1,260
102
gh_patches_debug_10313
rasdani/github-patches
git_diff
nilearn__nilearn-4049
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [DOC] Fix Sphinx issues related to adding new experimental module and typing ### Is there an existing issue for this? - [X] I have searched the existing issues ### Describe your proposed suggestion in detail. 1. Linkcode extension error https://github.com/nilearn/nilearn/pull/4017#issuecomment-1739213117 and related workaround: https://github.com/nilearn/nilearn/blob/9133bd98ddc085e14d545fd0439be621c8679f91/doc/sphinxext/github_link.py#L43-L44 2. See Sphinx warning https://github.com/nilearn/nilearn/pull/4017#issuecomment-1739630179 and related hotfix of remving sphinx role here: https://github.com/nilearn/nilearn/blob/9133bd98ddc085e14d545fd0439be621c8679f91/doc/modules/experimental.rst?plain=1#L10 3. Sphinx autodoc has an issue with resolving external libraries like numpy when using typehints. It seems to be a bug similar to what is reported here https://github.com/sphinx-doc/sphinx/issues/10785. For now this autodoc argument is added to hide the typehints from the integrated typing https://github.com/nilearn/nilearn/blob/9133bd98ddc085e14d545fd0439be621c8679f91/doc/conf.py#L57 And if we have both docstrings and the typehints we have double rendering in the docs of the description of a e.g. class and this is redundant and looks ugly so we should also discuss how we want to approach adding docstrings + typing and how to configure what will be displayed. I'll post an example once the dev docs are built ### List any pages that would be impacted. _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `doc/sphinxext/github_link.py` Content: ``` 1 import inspect 2 import os 3 import subprocess 4 import sys 5 from functools import partial 6 from operator import attrgetter 7 8 REVISION_CMD = "git rev-parse --short HEAD" 9 10 11 def _get_git_revision(): 12 try: 13 revision = subprocess.check_output(REVISION_CMD.split()).strip() 14 except (subprocess.CalledProcessError, OSError): 15 print("Failed to execute git to get revision") 16 return None 17 return revision.decode("utf-8") 18 19 20 def _linkcode_resolve(domain, info, package, url_fmt, revision): 21 """Determine a link to online source for a class/method/function. 22 23 This is called by sphinx.ext.linkcode 24 25 An example with a long-untouched module that everyone has 26 >>> _linkcode_resolve('py', {'module': 'tty', 27 ... 'fullname': 'setraw'}, 28 ... package='tty', 29 ... url_fmt='http://hg.python.org/cpython/file/' 30 ... '{revision}/Lib/{package}/{path}#L{lineno}', 31 ... revision='xxxx') 32 'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18' 33 """ 34 if revision is None: 35 return 36 if domain not in ("py", "pyx"): 37 return 38 if not info.get("module") or not info.get("fullname"): 39 return 40 41 class_name = info["fullname"].split(".")[0] 42 module = __import__(info["module"], fromlist=[class_name]) 43 if info["module"] == "nilearn.experimental.surface": 44 return 45 obj = attrgetter(info["fullname"])(module) 46 47 # Unwrap the object to get the correct source 48 # file in case that is wrapped by a decorator 49 obj = inspect.unwrap(obj) 50 51 try: 52 fn = inspect.getsourcefile(obj) 53 except Exception: 54 fn = None 55 if not fn: 56 try: 57 fn = inspect.getsourcefile(sys.modules[obj.__module__]) 58 except Exception: 59 fn = None 60 if not fn: 61 return 62 63 # Don't include filenames from outside this package's tree 64 if os.path.dirname(__import__(package).__file__) not in fn: 65 return 66 67 fn = os.path.relpath( 68 fn, start=os.path.dirname(__import__(package).__file__) 69 ) 70 try: 71 lineno = inspect.getsourcelines(obj)[1] 72 except Exception: 73 lineno = "" 74 return url_fmt.format( 75 revision=revision, package=package, path=fn, lineno=lineno 76 ) 77 78 79 def make_linkcode_resolve(package, url_fmt): 80 """Return a linkcode_resolve function for the given URL format. 81 82 revision is a git commit reference (hash or name) 83 84 package is the name of the root module of the package 85 86 url_fmt is along the lines of ('https://github.com/USER/PROJECT/' 87 'blob/{revision}/{package}/' 88 '{path}#L{lineno}') 89 """ 90 revision = _get_git_revision() 91 return partial( 92 _linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt 93 ) 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/doc/sphinxext/github_link.py b/doc/sphinxext/github_link.py --- a/doc/sphinxext/github_link.py +++ b/doc/sphinxext/github_link.py @@ -40,9 +40,12 @@ class_name = info["fullname"].split(".")[0] module = __import__(info["module"], fromlist=[class_name]) - if info["module"] == "nilearn.experimental.surface": + # For typed parameters, this will try to get uninitialized attributes + # and fail + try: + obj = attrgetter(info["fullname"])(module) + except AttributeError: return - obj = attrgetter(info["fullname"])(module) # Unwrap the object to get the correct source # file in case that is wrapped by a decorator
{"golden_diff": "diff --git a/doc/sphinxext/github_link.py b/doc/sphinxext/github_link.py\n--- a/doc/sphinxext/github_link.py\n+++ b/doc/sphinxext/github_link.py\n@@ -40,9 +40,12 @@\n \n class_name = info[\"fullname\"].split(\".\")[0]\n module = __import__(info[\"module\"], fromlist=[class_name])\n- if info[\"module\"] == \"nilearn.experimental.surface\":\n+ # For typed parameters, this will try to get uninitialized attributes\n+ # and fail\n+ try:\n+ obj = attrgetter(info[\"fullname\"])(module)\n+ except AttributeError:\n return\n- obj = attrgetter(info[\"fullname\"])(module)\n \n # Unwrap the object to get the correct source\n # file in case that is wrapped by a decorator\n", "issue": "[DOC] Fix Sphinx issues related to adding new experimental module and typing\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Describe your proposed suggestion in detail.\r\n\r\n1. Linkcode extension error https://github.com/nilearn/nilearn/pull/4017#issuecomment-1739213117 and related workaround: https://github.com/nilearn/nilearn/blob/9133bd98ddc085e14d545fd0439be621c8679f91/doc/sphinxext/github_link.py#L43-L44\r\n\r\n2. See Sphinx warning https://github.com/nilearn/nilearn/pull/4017#issuecomment-1739630179 and related hotfix of remving sphinx role here: https://github.com/nilearn/nilearn/blob/9133bd98ddc085e14d545fd0439be621c8679f91/doc/modules/experimental.rst?plain=1#L10\r\n\r\n3. Sphinx autodoc has an issue with resolving external libraries like numpy when using typehints. It seems to be a bug similar to what is reported here https://github.com/sphinx-doc/sphinx/issues/10785. For now this autodoc argument is added to hide the typehints from the integrated typing https://github.com/nilearn/nilearn/blob/9133bd98ddc085e14d545fd0439be621c8679f91/doc/conf.py#L57\r\nAnd if we have both docstrings and the typehints we have double rendering in the docs of the description of a e.g. class and this is redundant and looks ugly so we should also discuss how we want to approach adding docstrings + typing and how to configure what will be displayed. I'll post an example once the dev docs are built\r\n\r\n### List any pages that would be impacted.\r\n\r\n_No response_\n", "before_files": [{"content": "import inspect\nimport os\nimport subprocess\nimport sys\nfrom functools import partial\nfrom operator import attrgetter\n\nREVISION_CMD = \"git rev-parse --short HEAD\"\n\n\ndef _get_git_revision():\n try:\n revision = subprocess.check_output(REVISION_CMD.split()).strip()\n except (subprocess.CalledProcessError, OSError):\n print(\"Failed to execute git to get revision\")\n return None\n return revision.decode(\"utf-8\")\n\n\ndef _linkcode_resolve(domain, info, package, url_fmt, revision):\n \"\"\"Determine a link to online source for a class/method/function.\n\n This is called by sphinx.ext.linkcode\n\n An example with a long-untouched module that everyone has\n >>> _linkcode_resolve('py', {'module': 'tty',\n ... 'fullname': 'setraw'},\n ... package='tty',\n ... url_fmt='http://hg.python.org/cpython/file/'\n ... '{revision}/Lib/{package}/{path}#L{lineno}',\n ... revision='xxxx')\n 'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'\n \"\"\"\n if revision is None:\n return\n if domain not in (\"py\", \"pyx\"):\n return\n if not info.get(\"module\") or not info.get(\"fullname\"):\n return\n\n class_name = info[\"fullname\"].split(\".\")[0]\n module = __import__(info[\"module\"], fromlist=[class_name])\n if info[\"module\"] == \"nilearn.experimental.surface\":\n return\n obj = attrgetter(info[\"fullname\"])(module)\n\n # Unwrap the object to get the correct source\n # file in case that is wrapped by a decorator\n obj = inspect.unwrap(obj)\n\n try:\n fn = inspect.getsourcefile(obj)\n except Exception:\n fn = None\n if not fn:\n try:\n fn = inspect.getsourcefile(sys.modules[obj.__module__])\n except Exception:\n fn = None\n if not fn:\n return\n\n # Don't include filenames from outside this package's tree\n if os.path.dirname(__import__(package).__file__) not in fn:\n return\n\n fn = os.path.relpath(\n fn, start=os.path.dirname(__import__(package).__file__)\n )\n try:\n lineno = inspect.getsourcelines(obj)[1]\n except Exception:\n lineno = \"\"\n return url_fmt.format(\n revision=revision, package=package, path=fn, lineno=lineno\n )\n\n\ndef make_linkcode_resolve(package, url_fmt):\n \"\"\"Return a linkcode_resolve function for the given URL format.\n\n revision is a git commit reference (hash or name)\n\n package is the name of the root module of the package\n\n url_fmt is along the lines of ('https://github.com/USER/PROJECT/'\n 'blob/{revision}/{package}/'\n '{path}#L{lineno}')\n \"\"\"\n revision = _get_git_revision()\n return partial(\n _linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt\n )\n", "path": "doc/sphinxext/github_link.py"}], "after_files": [{"content": "import inspect\nimport os\nimport subprocess\nimport sys\nfrom functools import partial\nfrom operator import attrgetter\n\nREVISION_CMD = \"git rev-parse --short HEAD\"\n\n\ndef _get_git_revision():\n try:\n revision = subprocess.check_output(REVISION_CMD.split()).strip()\n except (subprocess.CalledProcessError, OSError):\n print(\"Failed to execute git to get revision\")\n return None\n return revision.decode(\"utf-8\")\n\n\ndef _linkcode_resolve(domain, info, package, url_fmt, revision):\n \"\"\"Determine a link to online source for a class/method/function.\n\n This is called by sphinx.ext.linkcode\n\n An example with a long-untouched module that everyone has\n >>> _linkcode_resolve('py', {'module': 'tty',\n ... 'fullname': 'setraw'},\n ... package='tty',\n ... url_fmt='http://hg.python.org/cpython/file/'\n ... '{revision}/Lib/{package}/{path}#L{lineno}',\n ... revision='xxxx')\n 'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'\n \"\"\"\n if revision is None:\n return\n if domain not in (\"py\", \"pyx\"):\n return\n if not info.get(\"module\") or not info.get(\"fullname\"):\n return\n\n class_name = info[\"fullname\"].split(\".\")[0]\n module = __import__(info[\"module\"], fromlist=[class_name])\n # For typed parameters, this will try to get uninitialized attributes\n # and fail\n try:\n obj = attrgetter(info[\"fullname\"])(module)\n except AttributeError:\n return\n\n # Unwrap the object to get the correct source\n # file in case that is wrapped by a decorator\n obj = inspect.unwrap(obj)\n\n try:\n fn = inspect.getsourcefile(obj)\n except Exception:\n fn = None\n if not fn:\n try:\n fn = inspect.getsourcefile(sys.modules[obj.__module__])\n except Exception:\n fn = None\n if not fn:\n return\n\n # Don't include filenames from outside this package's tree\n if os.path.dirname(__import__(package).__file__) not in fn:\n return\n\n fn = os.path.relpath(\n fn, start=os.path.dirname(__import__(package).__file__)\n )\n try:\n lineno = inspect.getsourcelines(obj)[1]\n except Exception:\n lineno = \"\"\n return url_fmt.format(\n revision=revision, package=package, path=fn, lineno=lineno\n )\n\n\ndef make_linkcode_resolve(package, url_fmt):\n \"\"\"Return a linkcode_resolve function for the given URL format.\n\n revision is a git commit reference (hash or name)\n\n package is the name of the root module of the package\n\n url_fmt is along the lines of ('https://github.com/USER/PROJECT/'\n 'blob/{revision}/{package}/'\n '{path}#L{lineno}')\n \"\"\"\n revision = _get_git_revision()\n return partial(\n _linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt\n )\n", "path": "doc/sphinxext/github_link.py"}]}
1,580
182
gh_patches_debug_14871
rasdani/github-patches
git_diff
liqd__a4-meinberlin-2610
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- preview in text commenting does not work URL: https://meinberlin-dev.liqd.net/dashboard/projects/textkommentierung/basic/ user: initiator expected behaviour: should be able to see preview behaviour: get 405 error Comment/Question: I checked brainstorming and it works. After I enter content for the document it also works, so I guess it has to do with the module text commenting. We also have the problem on prod, so I guess we need a hot fix. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `meinberlin/apps/documents/views.py` Content: ``` 1 from django.http import Http404 2 from django.urls import reverse 3 from django.utils.functional import cached_property 4 from django.utils.translation import ugettext_lazy as _ 5 from django.views import generic 6 7 from adhocracy4.dashboard import mixins as dashboard_mixins 8 from adhocracy4.projects.mixins import DisplayProjectOrModuleMixin 9 from adhocracy4.projects.mixins import ProjectMixin 10 from adhocracy4.rules import mixins as rules_mixins 11 from meinberlin.apps.exports.views import DashboardExportView 12 13 from . import models 14 15 16 class DocumentDashboardView(ProjectMixin, 17 dashboard_mixins.DashboardBaseMixin, 18 dashboard_mixins.DashboardComponentMixin, 19 generic.TemplateView): 20 template_name = 'meinberlin_documents/document_dashboard.html' 21 permission_required = 'a4projects.change_project' 22 23 def get_permission_object(self): 24 return self.project 25 26 27 class ChapterDetailView(ProjectMixin, 28 rules_mixins.PermissionRequiredMixin, 29 generic.DetailView, 30 DisplayProjectOrModuleMixin): 31 model = models.Chapter 32 permission_required = 'meinberlin_documents.view_chapter' 33 get_context_from_object = True 34 35 def get_context_data(self, **kwargs): 36 context = super().get_context_data(**kwargs) 37 context['chapter_list'] = self.chapter_list 38 return context 39 40 @property 41 def chapter_list(self): 42 return models.Chapter.objects.filter(module=self.module) 43 44 @cached_property 45 def extends(self): 46 if self.url_name == 'module-detail': 47 return 'a4modules/module_detail.html' 48 if self.url_name == 'chapter-detail': 49 if self.module.is_in_module_cluster: 50 return 'a4modules/module_detail.html' 51 return 'a4projects/project_detail.html' 52 53 54 class DocumentDetailView(ChapterDetailView): 55 get_context_from_object = False 56 57 def get_object(self): 58 first_chapter = models.Chapter.objects \ 59 .filter(module=self.module) \ 60 .first() 61 62 if not first_chapter: 63 raise Http404(_('Document has no chapters defined.')) 64 return first_chapter 65 66 67 class ParagraphDetailView(ProjectMixin, 68 rules_mixins.PermissionRequiredMixin, 69 generic.DetailView): 70 model = models.Paragraph 71 permission_required = 'meinberlin_documents.view_paragraph' 72 73 74 class DocumentDashboardExportView(DashboardExportView): 75 template_name = 'meinberlin_exports/export_dashboard.html' 76 77 def get_context_data(self, **kwargs): 78 context = super().get_context_data(**kwargs) 79 context['comment_export'] = reverse( 80 'a4dashboard:document-comment-export', 81 kwargs={'module_slug': self.module.slug}) 82 return context 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py --- a/meinberlin/apps/documents/views.py +++ b/meinberlin/apps/documents/views.py @@ -1,7 +1,5 @@ -from django.http import Http404 from django.urls import reverse from django.utils.functional import cached_property -from django.utils.translation import ugettext_lazy as _ from django.views import generic from adhocracy4.dashboard import mixins as dashboard_mixins @@ -58,9 +56,6 @@ first_chapter = models.Chapter.objects \ .filter(module=self.module) \ .first() - - if not first_chapter: - raise Http404(_('Document has no chapters defined.')) return first_chapter
{"golden_diff": "diff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py\n--- a/meinberlin/apps/documents/views.py\n+++ b/meinberlin/apps/documents/views.py\n@@ -1,7 +1,5 @@\n-from django.http import Http404\n from django.urls import reverse\n from django.utils.functional import cached_property\n-from django.utils.translation import ugettext_lazy as _\n from django.views import generic\n \n from adhocracy4.dashboard import mixins as dashboard_mixins\n@@ -58,9 +56,6 @@\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n-\n- if not first_chapter:\n- raise Http404(_('Document has no chapters defined.'))\n return first_chapter\n", "issue": "preview in text commenting does not work\nURL: https://meinberlin-dev.liqd.net/dashboard/projects/textkommentierung/basic/\r\nuser: initiator\r\nexpected behaviour: should be able to see preview\r\nbehaviour: get 405 error\r\nComment/Question: I checked brainstorming and it works. After I enter content for the document it also works, so I guess it has to do with the module text commenting. We also have the problem on prod, so I guess we need a hot fix.\n", "before_files": [{"content": "from django.http import Http404\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.projects.mixins import DisplayProjectOrModuleMixin\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.exports.views import DashboardExportView\n\nfrom . import models\n\n\nclass DocumentDashboardView(ProjectMixin,\n dashboard_mixins.DashboardBaseMixin,\n dashboard_mixins.DashboardComponentMixin,\n generic.TemplateView):\n template_name = 'meinberlin_documents/document_dashboard.html'\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.project\n\n\nclass ChapterDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView,\n DisplayProjectOrModuleMixin):\n model = models.Chapter\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n\n @property\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n\n @cached_property\n def extends(self):\n if self.url_name == 'module-detail':\n return 'a4modules/module_detail.html'\n if self.url_name == 'chapter-detail':\n if self.module.is_in_module_cluster:\n return 'a4modules/module_detail.html'\n return 'a4projects/project_detail.html'\n\n\nclass DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n\n def get_object(self):\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n\n if not first_chapter:\n raise Http404(_('Document has no chapters defined.'))\n return first_chapter\n\n\nclass ParagraphDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView):\n model = models.Paragraph\n permission_required = 'meinberlin_documents.view_paragraph'\n\n\nclass DocumentDashboardExportView(DashboardExportView):\n template_name = 'meinberlin_exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['comment_export'] = reverse(\n 'a4dashboard:document-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/documents/views.py"}], "after_files": [{"content": "from django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.projects.mixins import DisplayProjectOrModuleMixin\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.exports.views import DashboardExportView\n\nfrom . import models\n\n\nclass DocumentDashboardView(ProjectMixin,\n dashboard_mixins.DashboardBaseMixin,\n dashboard_mixins.DashboardComponentMixin,\n generic.TemplateView):\n template_name = 'meinberlin_documents/document_dashboard.html'\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.project\n\n\nclass ChapterDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView,\n DisplayProjectOrModuleMixin):\n model = models.Chapter\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n\n @property\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n\n @cached_property\n def extends(self):\n if self.url_name == 'module-detail':\n return 'a4modules/module_detail.html'\n if self.url_name == 'chapter-detail':\n if self.module.is_in_module_cluster:\n return 'a4modules/module_detail.html'\n return 'a4projects/project_detail.html'\n\n\nclass DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n\n def get_object(self):\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n return first_chapter\n\n\nclass ParagraphDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView):\n model = models.Paragraph\n permission_required = 'meinberlin_documents.view_paragraph'\n\n\nclass DocumentDashboardExportView(DashboardExportView):\n template_name = 'meinberlin_exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['comment_export'] = reverse(\n 'a4dashboard:document-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/documents/views.py"}]}
1,085
170
gh_patches_debug_598
rasdani/github-patches
git_diff
pex-tool__pex-1733
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release 2.1.82 On the docket: + [x] Pex resolve checking does not allow resolved pre-releases when --no-pre. #1730 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pex/version.py` Content: ``` 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.81" 5 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.81" +__version__ = "2.1.82"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.81\"\n+__version__ = \"2.1.82\"\n", "issue": "Release 2.1.82\nOn the docket:\r\n+ [x] Pex resolve checking does not allow resolved pre-releases when --no-pre. #1730 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.81\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.82\"\n", "path": "pex/version.py"}]}
347
96
gh_patches_debug_4478
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-2168
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- mypy doesn't pick up type hints[BUG] <!-- Thanks for reporting issues of python-telegram-bot! Use this template to notify us if you found a bug. To make it easier for us to help you please enter detailed information below. Please note, we only support the latest version of python-telegram-bot and master branch. Please make sure to upgrade & recreate the issue on the latest version prior to opening an issue. --> ### Steps to reproduce 1. import python-telegram-bot modules into your `test.py` file (e.g. `from telegram import InlineKeyboardMarkup`) 2. run `mypy test.py` 3. receive the following output `error: Skipping analyzing 'telegram': found module but no type hints or library stubs` ### Expected behaviour mypy should pickup the typehints as they've been added in the latest release (version `13` ) ### Actual behaviour mypy doesn't pickup the typehints. I'm confident this can be solved by either 1. adding `python-telegram-bot` to `typeshed` 2. adding a `py.typed` file More information can be found [here.](https://stackoverflow.com/questions/60856237/mypy-cant-find-type-hints-for-black) ### Configuration **Operating System:** Ubuntu 18.04 **Version of Python, python-telegram-bot & dependencies:** ``$ python -m telegram`` python-telegram-bot 13.0 certifi 2020.06.20 Python 3.8.0 (default, Oct 30 2020, 19:06:04) [GCC 7.5.0] ### Logs ``` error: Skipping analyzing 'telegram.ext': found module but no type hints or library stubs note: See https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports error: Skipping analyzing 'telegram': found module but no type hints or library stubs error: Skipping analyzing 'telegram.ext': found module but no type hints or library stubs error: Skipping analyzing 'telegram': found module but no type hints or library stubs ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 """The setup and build script for the python-telegram-bot library.""" 3 4 import codecs 5 import os 6 import sys 7 8 from setuptools import setup, find_packages 9 10 11 def requirements(): 12 """Build the requirements list for this project""" 13 requirements_list = [] 14 15 with open('requirements.txt') as requirements: 16 for install in requirements: 17 requirements_list.append(install.strip()) 18 19 return requirements_list 20 21 22 packages = find_packages(exclude=['tests*']) 23 requirements = requirements() 24 25 # Allow for a package install to not use the vendored urllib3 26 UPSTREAM_URLLIB3_FLAG = '--with-upstream-urllib3' 27 if UPSTREAM_URLLIB3_FLAG in sys.argv: 28 sys.argv.remove(UPSTREAM_URLLIB3_FLAG) 29 requirements.append('urllib3 >= 1.19.1') 30 packages = [x for x in packages if not x.startswith('telegram.vendor.ptb_urllib3')] 31 32 with codecs.open('README.rst', 'r', 'utf-8') as fd: 33 fn = os.path.join('telegram', 'version.py') 34 with open(fn) as fh: 35 code = compile(fh.read(), fn, 'exec') 36 exec(code) 37 38 setup(name='python-telegram-bot', 39 version=__version__, 40 author='Leandro Toledo', 41 author_email='devs@python-telegram-bot.org', 42 license='LGPLv3', 43 url='https://python-telegram-bot.org/', 44 keywords='python telegram bot api wrapper', 45 description="We have made you a wrapper you can't refuse", 46 long_description=fd.read(), 47 packages=packages, 48 install_requires=requirements, 49 extras_require={ 50 'json': 'ujson', 51 'socks': 'PySocks' 52 }, 53 include_package_data=True, 54 classifiers=[ 55 'Development Status :: 5 - Production/Stable', 56 'Intended Audience :: Developers', 57 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)', 58 'Operating System :: OS Independent', 59 'Topic :: Software Development :: Libraries :: Python Modules', 60 'Topic :: Communications :: Chat', 61 'Topic :: Internet', 62 'Programming Language :: Python', 63 'Programming Language :: Python :: 3', 64 'Programming Language :: Python :: 3.6', 65 'Programming Language :: Python :: 3.7', 66 'Programming Language :: Python :: 3.8', 67 'Programming Language :: Python :: 3.9', 68 ],) 69 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -45,6 +45,7 @@ description="We have made you a wrapper you can't refuse", long_description=fd.read(), packages=packages, + package_data={'telegram': ['py.typed']}, install_requires=requirements, extras_require={ 'json': 'ujson',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,6 +45,7 @@\n description=\"We have made you a wrapper you can't refuse\",\n long_description=fd.read(),\n packages=packages,\n+ package_data={'telegram': ['py.typed']},\n install_requires=requirements,\n extras_require={\n 'json': 'ujson',\n", "issue": "mypy doesn't pick up type hints[BUG]\n<!--\r\nThanks for reporting issues of python-telegram-bot!\r\n\r\nUse this template to notify us if you found a bug.\r\n\r\nTo make it easier for us to help you please enter detailed information below.\r\n\r\nPlease note, we only support the latest version of python-telegram-bot and\r\nmaster branch. Please make sure to upgrade & recreate the issue on the latest\r\nversion prior to opening an issue.\r\n-->\r\n### Steps to reproduce\r\n1. import python-telegram-bot modules into your `test.py` file (e.g. `from telegram import InlineKeyboardMarkup`)\r\n\r\n2. run `mypy test.py`\r\n\r\n3. receive the following output `error: Skipping analyzing 'telegram': found module but no type hints or library stubs`\r\n\r\n### Expected behaviour\r\nmypy should pickup the typehints as they've been added in the latest release (version `13` )\r\n\r\n### Actual behaviour\r\nmypy doesn't pickup the typehints.\r\n\r\nI'm confident this can be solved by either\r\n\r\n1. adding `python-telegram-bot` to `typeshed` \r\n2. adding a `py.typed` file\r\n\r\nMore information can be found [here.](https://stackoverflow.com/questions/60856237/mypy-cant-find-type-hints-for-black)\r\n\r\n### Configuration\r\n**Operating System:**\r\nUbuntu 18.04\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\n``$ python -m telegram``\r\npython-telegram-bot 13.0\r\ncertifi 2020.06.20\r\nPython 3.8.0 (default, Oct 30 2020, 19:06:04) [GCC 7.5.0]\r\n### Logs\r\n```\r\nerror: Skipping analyzing 'telegram.ext': found module but no type hints or library stubs\r\nnote: See https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports\r\nerror: Skipping analyzing 'telegram': found module but no type hints or library stubs\r\nerror: Skipping analyzing 'telegram.ext': found module but no type hints or library stubs\r\nerror: Skipping analyzing 'telegram': found module but no type hints or library stubs\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"The setup and build script for the python-telegram-bot library.\"\"\"\n\nimport codecs\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\ndef requirements():\n \"\"\"Build the requirements list for this project\"\"\"\n requirements_list = []\n\n with open('requirements.txt') as requirements:\n for install in requirements:\n requirements_list.append(install.strip())\n\n return requirements_list\n\n\npackages = find_packages(exclude=['tests*'])\nrequirements = requirements()\n\n# Allow for a package install to not use the vendored urllib3\nUPSTREAM_URLLIB3_FLAG = '--with-upstream-urllib3'\nif UPSTREAM_URLLIB3_FLAG in sys.argv:\n sys.argv.remove(UPSTREAM_URLLIB3_FLAG)\n requirements.append('urllib3 >= 1.19.1')\n packages = [x for x in packages if not x.startswith('telegram.vendor.ptb_urllib3')]\n\nwith codecs.open('README.rst', 'r', 'utf-8') as fd:\n fn = os.path.join('telegram', 'version.py')\n with open(fn) as fh:\n code = compile(fh.read(), fn, 'exec')\n exec(code)\n\n setup(name='python-telegram-bot',\n version=__version__,\n author='Leandro Toledo',\n author_email='devs@python-telegram-bot.org',\n license='LGPLv3',\n url='https://python-telegram-bot.org/',\n keywords='python telegram bot api wrapper',\n description=\"We have made you a wrapper you can't refuse\",\n long_description=fd.read(),\n packages=packages,\n install_requires=requirements,\n extras_require={\n 'json': 'ujson',\n 'socks': 'PySocks'\n },\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"The setup and build script for the python-telegram-bot library.\"\"\"\n\nimport codecs\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\ndef requirements():\n \"\"\"Build the requirements list for this project\"\"\"\n requirements_list = []\n\n with open('requirements.txt') as requirements:\n for install in requirements:\n requirements_list.append(install.strip())\n\n return requirements_list\n\n\npackages = find_packages(exclude=['tests*'])\nrequirements = requirements()\n\n# Allow for a package install to not use the vendored urllib3\nUPSTREAM_URLLIB3_FLAG = '--with-upstream-urllib3'\nif UPSTREAM_URLLIB3_FLAG in sys.argv:\n sys.argv.remove(UPSTREAM_URLLIB3_FLAG)\n requirements.append('urllib3 >= 1.19.1')\n packages = [x for x in packages if not x.startswith('telegram.vendor.ptb_urllib3')]\n\nwith codecs.open('README.rst', 'r', 'utf-8') as fd:\n fn = os.path.join('telegram', 'version.py')\n with open(fn) as fh:\n code = compile(fh.read(), fn, 'exec')\n exec(code)\n\n setup(name='python-telegram-bot',\n version=__version__,\n author='Leandro Toledo',\n author_email='devs@python-telegram-bot.org',\n license='LGPLv3',\n url='https://python-telegram-bot.org/',\n keywords='python telegram bot api wrapper',\n description=\"We have made you a wrapper you can't refuse\",\n long_description=fd.read(),\n packages=packages,\n package_data={'telegram': ['py.typed']},\n install_requires=requirements,\n extras_require={\n 'json': 'ujson',\n 'socks': 'PySocks'\n },\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],)\n", "path": "setup.py"}]}
1,387
88
gh_patches_debug_4094
rasdani/github-patches
git_diff
urllib3__urllib3-841
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PyOpenSSL: Fails to import after installation with the provided instructions The [Security documentation page](https://urllib3.readthedocs.org/en/latest/security.html#openssl-pyopenssl) indicates that to use PyOpenSSL that you must first install: - pip3 install pyopenssl ndg-httpsclient pyasn1 And then in a Python interpreter run: - import urllib3.contrib.pyopenssl - urllib3.contrib.pyopenssl.inject_into_urllib3() However on Python 3.4.4 (the latest 3.4.x) on OS X 10.11 (the latest OS X) I get the following traceback when executing `import urllib3.contrib.pyopenssl`: ``` Traceback (most recent call last): File "/Users/davidf/Projects/webcrystal/venv/lib/python3.4/site-packages/urllib3/contrib/pyopenssl.py", line 60, in <module> from socket import _fileobject ImportError: cannot import name '_fileobject' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/davidf/Projects/webcrystal/venv/lib/python3.4/site-packages/urllib3/contrib/pyopenssl.py", line 63, in <module> from urllib3.packages.backports.makefile import backport_makefile ImportError: No module named 'urllib3.packages.backports' ``` Perhaps there is some missing step in the documentation? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 from setuptools import setup 4 5 import os 6 import re 7 import codecs 8 9 base_path = os.path.dirname(__file__) 10 11 # Get the version (borrowed from SQLAlchemy) 12 fp = open(os.path.join(base_path, 'urllib3', '__init__.py')) 13 VERSION = re.compile(r".*__version__ = '(.*?)'", 14 re.S).match(fp.read()).group(1) 15 fp.close() 16 17 readme = codecs.open('README.rst', encoding='utf-8').read() 18 changes = codecs.open('CHANGES.rst', encoding='utf-8').read() 19 version = VERSION 20 21 setup(name='urllib3', 22 version=version, 23 description="HTTP library with thread-safe connection pooling, file post, and more.", 24 long_description=u'\n\n'.join([readme, changes]), 25 classifiers=[ 26 'Environment :: Web Environment', 27 'Intended Audience :: Developers', 28 'License :: OSI Approved :: MIT License', 29 'Operating System :: OS Independent', 30 'Programming Language :: Python', 31 'Programming Language :: Python :: 2', 32 'Programming Language :: Python :: 3', 33 'Topic :: Internet :: WWW/HTTP', 34 'Topic :: Software Development :: Libraries', 35 ], 36 keywords='urllib httplib threadsafe filepost http https ssl pooling', 37 author='Andrey Petrov', 38 author_email='andrey.petrov@shazow.net', 39 url='http://urllib3.readthedocs.org/', 40 license='MIT', 41 packages=['urllib3', 42 'urllib3.packages', 'urllib3.packages.ssl_match_hostname', 43 'urllib3.contrib', 'urllib3.util', 44 ], 45 requires=[], 46 tests_require=[ 47 # These are a less-specific subset of dev-requirements.txt, for the 48 # convenience of distro package maintainers. 49 'nose', 50 'mock', 51 'tornado', 52 ], 53 test_suite='test', 54 extras_require={ 55 'secure': [ 56 'pyOpenSSL>=0.13', 57 'ndg-httpsclient', 58 'pyasn1', 59 'certifi', 60 ], 61 'socks': [ 62 'PySocks>=1.5.6,<2.0', 63 ] 64 }, 65 ) 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -40,7 +40,8 @@ license='MIT', packages=['urllib3', 'urllib3.packages', 'urllib3.packages.ssl_match_hostname', - 'urllib3.contrib', 'urllib3.util', + 'urllib3.packages.backports', 'urllib3.contrib', + 'urllib3.util', ], requires=[], tests_require=[
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -40,7 +40,8 @@\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n- 'urllib3.contrib', 'urllib3.util',\n+ 'urllib3.packages.backports', 'urllib3.contrib',\n+ 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n", "issue": "PyOpenSSL: Fails to import after installation with the provided instructions\nThe [Security documentation page](https://urllib3.readthedocs.org/en/latest/security.html#openssl-pyopenssl) indicates that to use PyOpenSSL that you must first install:\n- pip3 install pyopenssl ndg-httpsclient pyasn1\n\nAnd then in a Python interpreter run:\n- import urllib3.contrib.pyopenssl\n- urllib3.contrib.pyopenssl.inject_into_urllib3()\n\nHowever on Python 3.4.4 (the latest 3.4.x) on OS X 10.11 (the latest OS X) I get the following traceback when executing `import urllib3.contrib.pyopenssl`:\n\n```\nTraceback (most recent call last):\n File \"/Users/davidf/Projects/webcrystal/venv/lib/python3.4/site-packages/urllib3/contrib/pyopenssl.py\", line 60, in <module>\n from socket import _fileobject\nImportError: cannot import name '_fileobject'\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/Users/davidf/Projects/webcrystal/venv/lib/python3.4/site-packages/urllib3/contrib/pyopenssl.py\", line 63, in <module>\n from urllib3.packages.backports.makefile import backport_makefile\nImportError: No module named 'urllib3.packages.backports'\n```\n\nPerhaps there is some missing step in the documentation?\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nfp = open(os.path.join(base_path, 'urllib3', '__init__.py'))\nVERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\nfp.close()\n\nreadme = codecs.open('README.rst', encoding='utf-8').read()\nchanges = codecs.open('CHANGES.rst', encoding='utf-8').read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='andrey.petrov@shazow.net',\n url='http://urllib3.readthedocs.org/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.contrib', 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'nose',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.13',\n 'ndg-httpsclient',\n 'pyasn1',\n 'certifi',\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0',\n ]\n },\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nfp = open(os.path.join(base_path, 'urllib3', '__init__.py'))\nVERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\nfp.close()\n\nreadme = codecs.open('README.rst', encoding='utf-8').read()\nchanges = codecs.open('CHANGES.rst', encoding='utf-8').read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='andrey.petrov@shazow.net',\n url='http://urllib3.readthedocs.org/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.packages.backports', 'urllib3.contrib',\n 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'nose',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.13',\n 'ndg-httpsclient',\n 'pyasn1',\n 'certifi',\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0',\n ]\n },\n )\n", "path": "setup.py"}]}
1,193
111
gh_patches_debug_30146
rasdani/github-patches
git_diff
beetbox__beets-4807
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- plugins: add markunmatched ## Description Add a plugin useful for bulk importers. ## To Do - [x] Documentation. (If you've add a new command-line flag, for example, find the appropriate page under `docs/` to describe it.) - [x] Changelog. (Add an entry to `docs/changelog.rst` near the top of the document.) - [ ] Tests. (Encouraged but not strictly required.) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `beetsplug/mbsubmit.py` Content: ``` 1 # This file is part of beets. 2 # Copyright 2016, Adrian Sampson and Diego Moreda. 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining 5 # a copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, 8 # distribute, sublicense, and/or sell copies of the Software, and to 9 # permit persons to whom the Software is furnished to do so, subject to 10 # the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be 13 # included in all copies or substantial portions of the Software. 14 15 """Aid in submitting information to MusicBrainz. 16 17 This plugin allows the user to print track information in a format that is 18 parseable by the MusicBrainz track parser [1]. Programmatic submitting is not 19 implemented by MusicBrainz yet. 20 21 [1] https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings 22 """ 23 24 25 from beets import ui 26 from beets.autotag import Recommendation 27 from beets.plugins import BeetsPlugin 28 from beets.ui.commands import PromptChoice 29 from beetsplug.info import print_data 30 31 32 class MBSubmitPlugin(BeetsPlugin): 33 def __init__(self): 34 super().__init__() 35 36 self.config.add( 37 { 38 "format": "$track. $title - $artist ($length)", 39 "threshold": "medium", 40 } 41 ) 42 43 # Validate and store threshold. 44 self.threshold = self.config["threshold"].as_choice( 45 { 46 "none": Recommendation.none, 47 "low": Recommendation.low, 48 "medium": Recommendation.medium, 49 "strong": Recommendation.strong, 50 } 51 ) 52 53 self.register_listener( 54 "before_choose_candidate", self.before_choose_candidate_event 55 ) 56 57 def before_choose_candidate_event(self, session, task): 58 if task.rec <= self.threshold: 59 return [PromptChoice("p", "Print tracks", self.print_tracks)] 60 61 def print_tracks(self, session, task): 62 for i in sorted(task.items, key=lambda i: i.track): 63 print_data(None, i, self.config["format"].as_str()) 64 65 def commands(self): 66 """Add beet UI commands for mbsubmit.""" 67 mbsubmit_cmd = ui.Subcommand( 68 "mbsubmit", help="Submit Tracks to MusicBrainz" 69 ) 70 71 def func(lib, opts, args): 72 items = lib.items(ui.decargs(args)) 73 self._mbsubmit(items) 74 75 mbsubmit_cmd.func = func 76 77 return [mbsubmit_cmd] 78 79 def _mbsubmit(self, items): 80 """Print track information to be submitted to MusicBrainz.""" 81 for i in sorted(items, key=lambda i: i.track): 82 print_data(None, i, self.config["format"].as_str()) 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/beetsplug/mbsubmit.py b/beetsplug/mbsubmit.py --- a/beetsplug/mbsubmit.py +++ b/beetsplug/mbsubmit.py @@ -21,11 +21,13 @@ [1] https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings """ +import subprocess from beets import ui from beets.autotag import Recommendation from beets.plugins import BeetsPlugin from beets.ui.commands import PromptChoice +from beets.util import displayable_path from beetsplug.info import print_data @@ -37,6 +39,7 @@ { "format": "$track. $title - $artist ($length)", "threshold": "medium", + "picard_path": "picard", } ) @@ -56,7 +59,21 @@ def before_choose_candidate_event(self, session, task): if task.rec <= self.threshold: - return [PromptChoice("p", "Print tracks", self.print_tracks)] + return [ + PromptChoice("p", "Print tracks", self.print_tracks), + PromptChoice("o", "Open files with Picard", self.picard), + ] + + def picard(self, session, task): + paths = [] + for p in task.paths: + paths.append(displayable_path(p)) + try: + picard_path = self.config["picard_path"].as_str() + subprocess.Popen([picard_path] + paths) + self._log.info("launched picard from\n{}", picard_path) + except OSError as exc: + self._log.error(f"Could not open picard, got error:\n{exc}") def print_tracks(self, session, task): for i in sorted(task.items, key=lambda i: i.track):
{"golden_diff": "diff --git a/beetsplug/mbsubmit.py b/beetsplug/mbsubmit.py\n--- a/beetsplug/mbsubmit.py\n+++ b/beetsplug/mbsubmit.py\n@@ -21,11 +21,13 @@\n [1] https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings\n \"\"\"\n \n+import subprocess\n \n from beets import ui\n from beets.autotag import Recommendation\n from beets.plugins import BeetsPlugin\n from beets.ui.commands import PromptChoice\n+from beets.util import displayable_path\n from beetsplug.info import print_data\n \n \n@@ -37,6 +39,7 @@\n {\n \"format\": \"$track. $title - $artist ($length)\",\n \"threshold\": \"medium\",\n+ \"picard_path\": \"picard\",\n }\n )\n \n@@ -56,7 +59,21 @@\n \n def before_choose_candidate_event(self, session, task):\n if task.rec <= self.threshold:\n- return [PromptChoice(\"p\", \"Print tracks\", self.print_tracks)]\n+ return [\n+ PromptChoice(\"p\", \"Print tracks\", self.print_tracks),\n+ PromptChoice(\"o\", \"Open files with Picard\", self.picard),\n+ ]\n+\n+ def picard(self, session, task):\n+ paths = []\n+ for p in task.paths:\n+ paths.append(displayable_path(p))\n+ try:\n+ picard_path = self.config[\"picard_path\"].as_str()\n+ subprocess.Popen([picard_path] + paths)\n+ self._log.info(\"launched picard from\\n{}\", picard_path)\n+ except OSError as exc:\n+ self._log.error(f\"Could not open picard, got error:\\n{exc}\")\n \n def print_tracks(self, session, task):\n for i in sorted(task.items, key=lambda i: i.track):\n", "issue": "plugins: add markunmatched\n## Description\r\n\r\nAdd a plugin useful for bulk importers.\r\n\r\n## To Do\r\n\r\n- [x] Documentation. (If you've add a new command-line flag, for example, find the appropriate page under `docs/` to describe it.)\r\n- [x] Changelog. (Add an entry to `docs/changelog.rst` near the top of the document.)\r\n- [ ] Tests. (Encouraged but not strictly required.)\r\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2016, Adrian Sampson and Diego Moreda.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Aid in submitting information to MusicBrainz.\n\nThis plugin allows the user to print track information in a format that is\nparseable by the MusicBrainz track parser [1]. Programmatic submitting is not\nimplemented by MusicBrainz yet.\n\n[1] https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings\n\"\"\"\n\n\nfrom beets import ui\nfrom beets.autotag import Recommendation\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui.commands import PromptChoice\nfrom beetsplug.info import print_data\n\n\nclass MBSubmitPlugin(BeetsPlugin):\n def __init__(self):\n super().__init__()\n\n self.config.add(\n {\n \"format\": \"$track. $title - $artist ($length)\",\n \"threshold\": \"medium\",\n }\n )\n\n # Validate and store threshold.\n self.threshold = self.config[\"threshold\"].as_choice(\n {\n \"none\": Recommendation.none,\n \"low\": Recommendation.low,\n \"medium\": Recommendation.medium,\n \"strong\": Recommendation.strong,\n }\n )\n\n self.register_listener(\n \"before_choose_candidate\", self.before_choose_candidate_event\n )\n\n def before_choose_candidate_event(self, session, task):\n if task.rec <= self.threshold:\n return [PromptChoice(\"p\", \"Print tracks\", self.print_tracks)]\n\n def print_tracks(self, session, task):\n for i in sorted(task.items, key=lambda i: i.track):\n print_data(None, i, self.config[\"format\"].as_str())\n\n def commands(self):\n \"\"\"Add beet UI commands for mbsubmit.\"\"\"\n mbsubmit_cmd = ui.Subcommand(\n \"mbsubmit\", help=\"Submit Tracks to MusicBrainz\"\n )\n\n def func(lib, opts, args):\n items = lib.items(ui.decargs(args))\n self._mbsubmit(items)\n\n mbsubmit_cmd.func = func\n\n return [mbsubmit_cmd]\n\n def _mbsubmit(self, items):\n \"\"\"Print track information to be submitted to MusicBrainz.\"\"\"\n for i in sorted(items, key=lambda i: i.track):\n print_data(None, i, self.config[\"format\"].as_str())\n", "path": "beetsplug/mbsubmit.py"}], "after_files": [{"content": "# This file is part of beets.\n# Copyright 2016, Adrian Sampson and Diego Moreda.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Aid in submitting information to MusicBrainz.\n\nThis plugin allows the user to print track information in a format that is\nparseable by the MusicBrainz track parser [1]. Programmatic submitting is not\nimplemented by MusicBrainz yet.\n\n[1] https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings\n\"\"\"\n\nimport subprocess\n\nfrom beets import ui\nfrom beets.autotag import Recommendation\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui.commands import PromptChoice\nfrom beets.util import displayable_path\nfrom beetsplug.info import print_data\n\n\nclass MBSubmitPlugin(BeetsPlugin):\n def __init__(self):\n super().__init__()\n\n self.config.add(\n {\n \"format\": \"$track. $title - $artist ($length)\",\n \"threshold\": \"medium\",\n \"picard_path\": \"picard\",\n }\n )\n\n # Validate and store threshold.\n self.threshold = self.config[\"threshold\"].as_choice(\n {\n \"none\": Recommendation.none,\n \"low\": Recommendation.low,\n \"medium\": Recommendation.medium,\n \"strong\": Recommendation.strong,\n }\n )\n\n self.register_listener(\n \"before_choose_candidate\", self.before_choose_candidate_event\n )\n\n def before_choose_candidate_event(self, session, task):\n if task.rec <= self.threshold:\n return [\n PromptChoice(\"p\", \"Print tracks\", self.print_tracks),\n PromptChoice(\"o\", \"Open files with Picard\", self.picard),\n ]\n\n def picard(self, session, task):\n paths = []\n for p in task.paths:\n paths.append(displayable_path(p))\n try:\n picard_path = self.config[\"picard_path\"].as_str()\n subprocess.Popen([picard_path] + paths)\n self._log.info(\"launched picard from\\n{}\", picard_path)\n except OSError as exc:\n self._log.error(f\"Could not open picard, got error:\\n{exc}\")\n\n def print_tracks(self, session, task):\n for i in sorted(task.items, key=lambda i: i.track):\n print_data(None, i, self.config[\"format\"].as_str())\n\n def commands(self):\n \"\"\"Add beet UI commands for mbsubmit.\"\"\"\n mbsubmit_cmd = ui.Subcommand(\n \"mbsubmit\", help=\"Submit Tracks to MusicBrainz\"\n )\n\n def func(lib, opts, args):\n items = lib.items(ui.decargs(args))\n self._mbsubmit(items)\n\n mbsubmit_cmd.func = func\n\n return [mbsubmit_cmd]\n\n def _mbsubmit(self, items):\n \"\"\"Print track information to be submitted to MusicBrainz.\"\"\"\n for i in sorted(items, key=lambda i: i.track):\n print_data(None, i, self.config[\"format\"].as_str())\n", "path": "beetsplug/mbsubmit.py"}]}
1,138
415
gh_patches_debug_2187
rasdani/github-patches
git_diff
projectmesa__mesa-989
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Color patches takes a long time to load When you hit "reset" to load the color patches example, it takes a long time to load. Not sure why. As a result, I thought it was broken. To recreate... ``` cd examples/color_patches python run.py ``` Wait for patches to load when browser window pops up. OR hit reset and wait for color patches to load. This is what it should look something like... <img width="407" alt="screen shot 2018-04-01 at 10 03 33 pm" src="https://user-images.githubusercontent.com/166734/38180194-95c2acb0-35f8-11e8-8c1b-8bd7a6d25098.png"> Color patches takes a long time to load When you hit "reset" to load the color patches example, it takes a long time to load. Not sure why. As a result, I thought it was broken. To recreate... ``` cd examples/color_patches python run.py ``` Wait for patches to load when browser window pops up. OR hit reset and wait for color patches to load. This is what it should look something like... <img width="407" alt="screen shot 2018-04-01 at 10 03 33 pm" src="https://user-images.githubusercontent.com/166734/38180194-95c2acb0-35f8-11e8-8c1b-8bd7a6d25098.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/color_patches/color_patches/server.py` Content: ``` 1 """ 2 handles the definition of the canvas parameters and 3 the drawing of the model representation on the canvas 4 """ 5 # import webbrowser 6 7 from mesa.visualization.modules import CanvasGrid 8 from mesa.visualization.ModularVisualization import ModularServer 9 10 from .model import ColorPatches 11 12 _COLORS = [ 13 "Aqua", 14 "Blue", 15 "Fuchsia", 16 "Gray", 17 "Green", 18 "Lime", 19 "Maroon", 20 "Navy", 21 "Olive", 22 "Orange", 23 "Purple", 24 "Red", 25 "Silver", 26 "Teal", 27 "White", 28 "Yellow", 29 ] 30 31 32 grid_rows = 50 33 grid_cols = 25 34 cell_size = 10 35 canvas_width = grid_rows * cell_size 36 canvas_height = grid_cols * cell_size 37 38 39 def color_patch_draw(cell): 40 """ 41 This function is registered with the visualization server to be called 42 each tick to indicate how to draw the cell in its current state. 43 44 :param cell: the cell in the simulation 45 46 :return: the portrayal dictionary. 47 48 """ 49 assert cell is not None 50 portrayal = {"Shape": "rect", "w": 1, "h": 1, "Filled": "true", "Layer": 0} 51 portrayal["x"] = cell.get_row() 52 portrayal["y"] = cell.get_col() 53 portrayal["Color"] = _COLORS[cell.get_state()] 54 return portrayal 55 56 57 canvas_element = CanvasGrid( 58 color_patch_draw, grid_rows, grid_cols, canvas_width, canvas_height 59 ) 60 61 server = ModularServer( 62 ColorPatches, 63 [canvas_element], 64 "Color Patches", 65 {"width": canvas_width, "height": canvas_height}, 66 ) 67 68 # webbrowser.open('http://127.0.0.1:8521') # TODO: make this configurable 69 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/color_patches/color_patches/server.py b/examples/color_patches/color_patches/server.py --- a/examples/color_patches/color_patches/server.py +++ b/examples/color_patches/color_patches/server.py @@ -62,7 +62,7 @@ ColorPatches, [canvas_element], "Color Patches", - {"width": canvas_width, "height": canvas_height}, + {"width": grid_rows, "height": grid_cols}, ) # webbrowser.open('http://127.0.0.1:8521') # TODO: make this configurable
{"golden_diff": "diff --git a/examples/color_patches/color_patches/server.py b/examples/color_patches/color_patches/server.py\n--- a/examples/color_patches/color_patches/server.py\n+++ b/examples/color_patches/color_patches/server.py\n@@ -62,7 +62,7 @@\n ColorPatches,\n [canvas_element],\n \"Color Patches\",\n- {\"width\": canvas_width, \"height\": canvas_height},\n+ {\"width\": grid_rows, \"height\": grid_cols},\n )\n \n # webbrowser.open('http://127.0.0.1:8521') # TODO: make this configurable\n", "issue": "Color patches takes a long time to load\nWhen you hit \"reset\" to load the color patches example, it takes a long time to load.\r\nNot sure why. As a result, I thought it was broken.\r\n\r\nTo recreate... \r\n```\r\ncd examples/color_patches\r\npython run.py\r\n```\r\n\r\nWait for patches to load when browser window pops up. \r\nOR hit reset and wait for color patches to load.\r\n\r\nThis is what it should look something like...\r\n<img width=\"407\" alt=\"screen shot 2018-04-01 at 10 03 33 pm\" src=\"https://user-images.githubusercontent.com/166734/38180194-95c2acb0-35f8-11e8-8c1b-8bd7a6d25098.png\">\nColor patches takes a long time to load\nWhen you hit \"reset\" to load the color patches example, it takes a long time to load.\r\nNot sure why. As a result, I thought it was broken.\r\n\r\nTo recreate... \r\n```\r\ncd examples/color_patches\r\npython run.py\r\n```\r\n\r\nWait for patches to load when browser window pops up. \r\nOR hit reset and wait for color patches to load.\r\n\r\nThis is what it should look something like...\r\n<img width=\"407\" alt=\"screen shot 2018-04-01 at 10 03 33 pm\" src=\"https://user-images.githubusercontent.com/166734/38180194-95c2acb0-35f8-11e8-8c1b-8bd7a6d25098.png\">\n", "before_files": [{"content": "\"\"\"\nhandles the definition of the canvas parameters and\nthe drawing of the model representation on the canvas\n\"\"\"\n# import webbrowser\n\nfrom mesa.visualization.modules import CanvasGrid\nfrom mesa.visualization.ModularVisualization import ModularServer\n\nfrom .model import ColorPatches\n\n_COLORS = [\n \"Aqua\",\n \"Blue\",\n \"Fuchsia\",\n \"Gray\",\n \"Green\",\n \"Lime\",\n \"Maroon\",\n \"Navy\",\n \"Olive\",\n \"Orange\",\n \"Purple\",\n \"Red\",\n \"Silver\",\n \"Teal\",\n \"White\",\n \"Yellow\",\n]\n\n\ngrid_rows = 50\ngrid_cols = 25\ncell_size = 10\ncanvas_width = grid_rows * cell_size\ncanvas_height = grid_cols * cell_size\n\n\ndef color_patch_draw(cell):\n \"\"\"\n This function is registered with the visualization server to be called\n each tick to indicate how to draw the cell in its current state.\n\n :param cell: the cell in the simulation\n\n :return: the portrayal dictionary.\n\n \"\"\"\n assert cell is not None\n portrayal = {\"Shape\": \"rect\", \"w\": 1, \"h\": 1, \"Filled\": \"true\", \"Layer\": 0}\n portrayal[\"x\"] = cell.get_row()\n portrayal[\"y\"] = cell.get_col()\n portrayal[\"Color\"] = _COLORS[cell.get_state()]\n return portrayal\n\n\ncanvas_element = CanvasGrid(\n color_patch_draw, grid_rows, grid_cols, canvas_width, canvas_height\n)\n\nserver = ModularServer(\n ColorPatches,\n [canvas_element],\n \"Color Patches\",\n {\"width\": canvas_width, \"height\": canvas_height},\n)\n\n# webbrowser.open('http://127.0.0.1:8521') # TODO: make this configurable\n", "path": "examples/color_patches/color_patches/server.py"}], "after_files": [{"content": "\"\"\"\nhandles the definition of the canvas parameters and\nthe drawing of the model representation on the canvas\n\"\"\"\n# import webbrowser\n\nfrom mesa.visualization.modules import CanvasGrid\nfrom mesa.visualization.ModularVisualization import ModularServer\n\nfrom .model import ColorPatches\n\n_COLORS = [\n \"Aqua\",\n \"Blue\",\n \"Fuchsia\",\n \"Gray\",\n \"Green\",\n \"Lime\",\n \"Maroon\",\n \"Navy\",\n \"Olive\",\n \"Orange\",\n \"Purple\",\n \"Red\",\n \"Silver\",\n \"Teal\",\n \"White\",\n \"Yellow\",\n]\n\n\ngrid_rows = 50\ngrid_cols = 25\ncell_size = 10\ncanvas_width = grid_rows * cell_size\ncanvas_height = grid_cols * cell_size\n\n\ndef color_patch_draw(cell):\n \"\"\"\n This function is registered with the visualization server to be called\n each tick to indicate how to draw the cell in its current state.\n\n :param cell: the cell in the simulation\n\n :return: the portrayal dictionary.\n\n \"\"\"\n assert cell is not None\n portrayal = {\"Shape\": \"rect\", \"w\": 1, \"h\": 1, \"Filled\": \"true\", \"Layer\": 0}\n portrayal[\"x\"] = cell.get_row()\n portrayal[\"y\"] = cell.get_col()\n portrayal[\"Color\"] = _COLORS[cell.get_state()]\n return portrayal\n\n\ncanvas_element = CanvasGrid(\n color_patch_draw, grid_rows, grid_cols, canvas_width, canvas_height\n)\n\nserver = ModularServer(\n ColorPatches,\n [canvas_element],\n \"Color Patches\",\n {\"width\": grid_rows, \"height\": grid_cols},\n)\n\n# webbrowser.open('http://127.0.0.1:8521') # TODO: make this configurable\n", "path": "examples/color_patches/color_patches/server.py"}]}
1,170
128
gh_patches_debug_15770
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-1793
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Documentation references old propagators module `propagators.inject` is being used [here](https://github.com/open-telemetry/opentelemetry-python/blob/main/docs/examples/auto-instrumentation/client.py#L40), `propagators.extract` is being used [here](https://github.com/open-telemetry/opentelemetry-python/blame/main/docs/examples/auto-instrumentation/README.rst#L40). They should use `propagate` instead. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/examples/auto-instrumentation/client.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from sys import argv 16 17 from requests import get 18 19 from opentelemetry import propagators, trace 20 from opentelemetry.sdk.trace import TracerProvider 21 from opentelemetry.sdk.trace.export import ( 22 ConsoleSpanExporter, 23 SimpleSpanProcessor, 24 ) 25 26 trace.set_tracer_provider(TracerProvider()) 27 tracer = trace.get_tracer_provider().get_tracer(__name__) 28 29 trace.get_tracer_provider().add_span_processor( 30 SimpleSpanProcessor(ConsoleSpanExporter()) 31 ) 32 33 34 assert len(argv) == 2 35 36 with tracer.start_as_current_span("client"): 37 38 with tracer.start_as_current_span("client-server"): 39 headers = {} 40 propagators.inject(headers) 41 requested = get( 42 "http://localhost:8082/server_request", 43 params={"param": argv[1]}, 44 headers=headers, 45 ) 46 47 assert requested.status_code == 200 48 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/examples/auto-instrumentation/client.py b/docs/examples/auto-instrumentation/client.py --- a/docs/examples/auto-instrumentation/client.py +++ b/docs/examples/auto-instrumentation/client.py @@ -16,7 +16,8 @@ from requests import get -from opentelemetry import propagators, trace +from opentelemetry import trace +from opentelemetry.propagate import inject from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( ConsoleSpanExporter, @@ -37,7 +38,7 @@ with tracer.start_as_current_span("client-server"): headers = {} - propagators.inject(headers) + inject(headers) requested = get( "http://localhost:8082/server_request", params={"param": argv[1]},
{"golden_diff": "diff --git a/docs/examples/auto-instrumentation/client.py b/docs/examples/auto-instrumentation/client.py\n--- a/docs/examples/auto-instrumentation/client.py\n+++ b/docs/examples/auto-instrumentation/client.py\n@@ -16,7 +16,8 @@\n \n from requests import get\n \n-from opentelemetry import propagators, trace\n+from opentelemetry import trace\n+from opentelemetry.propagate import inject\n from opentelemetry.sdk.trace import TracerProvider\n from opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n@@ -37,7 +38,7 @@\n \n with tracer.start_as_current_span(\"client-server\"):\n headers = {}\n- propagators.inject(headers)\n+ inject(headers)\n requested = get(\n \"http://localhost:8082/server_request\",\n params={\"param\": argv[1]},\n", "issue": "Documentation references old propagators module\n`propagators.inject` is being used [here](https://github.com/open-telemetry/opentelemetry-python/blob/main/docs/examples/auto-instrumentation/client.py#L40), `propagators.extract` is being used [here](https://github.com/open-telemetry/opentelemetry-python/blame/main/docs/examples/auto-instrumentation/README.rst#L40). They should use `propagate` instead.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom sys import argv\n\nfrom requests import get\n\nfrom opentelemetry import propagators, trace\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleSpanProcessor,\n)\n\ntrace.set_tracer_provider(TracerProvider())\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\ntrace.get_tracer_provider().add_span_processor(\n SimpleSpanProcessor(ConsoleSpanExporter())\n)\n\n\nassert len(argv) == 2\n\nwith tracer.start_as_current_span(\"client\"):\n\n with tracer.start_as_current_span(\"client-server\"):\n headers = {}\n propagators.inject(headers)\n requested = get(\n \"http://localhost:8082/server_request\",\n params={\"param\": argv[1]},\n headers=headers,\n )\n\n assert requested.status_code == 200\n", "path": "docs/examples/auto-instrumentation/client.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom sys import argv\n\nfrom requests import get\n\nfrom opentelemetry import trace\nfrom opentelemetry.propagate import inject\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleSpanProcessor,\n)\n\ntrace.set_tracer_provider(TracerProvider())\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\ntrace.get_tracer_provider().add_span_processor(\n SimpleSpanProcessor(ConsoleSpanExporter())\n)\n\n\nassert len(argv) == 2\n\nwith tracer.start_as_current_span(\"client\"):\n\n with tracer.start_as_current_span(\"client-server\"):\n headers = {}\n inject(headers)\n requested = get(\n \"http://localhost:8082/server_request\",\n params={\"param\": argv[1]},\n headers=headers,\n )\n\n assert requested.status_code == 200\n", "path": "docs/examples/auto-instrumentation/client.py"}]}
758
182
gh_patches_debug_17971
rasdani/github-patches
git_diff
googleapis__python-bigquery-630
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The DB API Binary function should accept bytes. ``` (3.8) jim@ds9:~/p/g/python-bigquery-sqlalchemy$ python Python 3.8.5 (default, Jan 27 2021, 15:41:15) [GCC 9.3.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import google.cloud.bigquery.dbapi >>> google.cloud.bigquery.dbapi.Binary(b'x') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/jim/p/g/python-bigquery/google/cloud/bigquery/dbapi/types.py", line 42, in Binary return string.encode("utf-8") AttributeError: 'bytes' object has no attribute 'encode' ``` Bytes are the most common way to represent binary data. Accepting strings, as it does now seems at best to be a convenience and at worst a bug magnet. In SQLAlchemy, if you defined a model that has a binary attribute, you'd store bytes data in it, but that would break for bigquery, di to this issue. Sqlite's `Binary` function requires bytes data. I propose to change the function to accept bytes. For the sake of backward compatibility, I propose to continue to accept strings. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `google/cloud/bigquery/dbapi/types.py` Content: ``` 1 # Copyright 2017 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Types used in the Google BigQuery DB-API. 16 17 See `PEP-249`_ for details. 18 19 .. _PEP-249: 20 https://www.python.org/dev/peps/pep-0249/#type-objects-and-constructors 21 """ 22 23 import datetime 24 25 26 Date = datetime.date 27 Time = datetime.time 28 Timestamp = datetime.datetime 29 DateFromTicks = datetime.date.fromtimestamp 30 TimestampFromTicks = datetime.datetime.fromtimestamp 31 32 33 def Binary(string): 34 """Contruct a DB-API binary value. 35 36 Args: 37 string (str): A string to encode as a binary value. 38 39 Returns: 40 bytes: The UTF-8 encoded bytes representing the string. 41 """ 42 return string.encode("utf-8") 43 44 45 def TimeFromTicks(ticks, tz=None): 46 """Construct a DB-API time value from the given ticks value. 47 48 Args: 49 ticks (float): 50 a number of seconds since the epoch; see the documentation of the 51 standard Python time module for details. 52 53 tz (datetime.tzinfo): (Optional) time zone to use for conversion 54 55 Returns: 56 datetime.time: time represented by ticks. 57 """ 58 dt = datetime.datetime.fromtimestamp(ticks, tz=tz) 59 return dt.timetz() 60 61 62 class _DBAPITypeObject(object): 63 """DB-API type object which compares equal to many different strings. 64 65 See `PEP-249`_ for details. 66 67 .. _PEP-249: 68 https://www.python.org/dev/peps/pep-0249/#implementation-hints-for-module-authors 69 """ 70 71 def __init__(self, *values): 72 self.values = values 73 74 def __eq__(self, other): 75 return other in self.values 76 77 78 STRING = "STRING" 79 BINARY = _DBAPITypeObject("BYTES", "RECORD", "STRUCT") 80 NUMBER = _DBAPITypeObject( 81 "INTEGER", "INT64", "FLOAT", "FLOAT64", "NUMERIC", "BIGNUMERIC", "BOOLEAN", "BOOL" 82 ) 83 DATETIME = _DBAPITypeObject("TIMESTAMP", "DATE", "TIME", "DATETIME") 84 ROWID = "ROWID" 85 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/google/cloud/bigquery/dbapi/types.py b/google/cloud/bigquery/dbapi/types.py --- a/google/cloud/bigquery/dbapi/types.py +++ b/google/cloud/bigquery/dbapi/types.py @@ -30,16 +30,28 @@ TimestampFromTicks = datetime.datetime.fromtimestamp -def Binary(string): +def Binary(data): """Contruct a DB-API binary value. Args: - string (str): A string to encode as a binary value. + data (bytes-like): An object containing binary data and that + can be converted to bytes with the `bytes` builtin. Returns: - bytes: The UTF-8 encoded bytes representing the string. + bytes: The binary data as a bytes object. """ - return string.encode("utf-8") + if isinstance(data, int): + # This is not the conversion we're looking for, because it + # will simply create a bytes object of the given size. + raise TypeError("cannot convert `int` object to binary") + + try: + return bytes(data) + except TypeError: + if isinstance(data, str): + return data.encode("utf-8") + else: + raise def TimeFromTicks(ticks, tz=None):
{"golden_diff": "diff --git a/google/cloud/bigquery/dbapi/types.py b/google/cloud/bigquery/dbapi/types.py\n--- a/google/cloud/bigquery/dbapi/types.py\n+++ b/google/cloud/bigquery/dbapi/types.py\n@@ -30,16 +30,28 @@\n TimestampFromTicks = datetime.datetime.fromtimestamp\n \n \n-def Binary(string):\n+def Binary(data):\n \"\"\"Contruct a DB-API binary value.\n \n Args:\n- string (str): A string to encode as a binary value.\n+ data (bytes-like): An object containing binary data and that\n+ can be converted to bytes with the `bytes` builtin.\n \n Returns:\n- bytes: The UTF-8 encoded bytes representing the string.\n+ bytes: The binary data as a bytes object.\n \"\"\"\n- return string.encode(\"utf-8\")\n+ if isinstance(data, int):\n+ # This is not the conversion we're looking for, because it\n+ # will simply create a bytes object of the given size.\n+ raise TypeError(\"cannot convert `int` object to binary\")\n+\n+ try:\n+ return bytes(data)\n+ except TypeError:\n+ if isinstance(data, str):\n+ return data.encode(\"utf-8\")\n+ else:\n+ raise\n \n \n def TimeFromTicks(ticks, tz=None):\n", "issue": "The DB API Binary function should accept bytes.\n```\r\n(3.8) jim@ds9:~/p/g/python-bigquery-sqlalchemy$ python\r\nPython 3.8.5 (default, Jan 27 2021, 15:41:15) \r\n[GCC 9.3.0] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import google.cloud.bigquery.dbapi\r\n>>> google.cloud.bigquery.dbapi.Binary(b'x')\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/jim/p/g/python-bigquery/google/cloud/bigquery/dbapi/types.py\", line 42, in Binary\r\n return string.encode(\"utf-8\")\r\nAttributeError: 'bytes' object has no attribute 'encode'\r\n```\r\nBytes are the most common way to represent binary data. Accepting strings, as it does now seems at best to be a convenience and at worst a bug magnet.\r\n\r\nIn SQLAlchemy, if you defined a model that has a binary attribute, you'd store bytes data in it, but that would break for bigquery, di to this issue.\r\n\r\nSqlite's `Binary` function requires bytes data.\r\n\r\nI propose to change the function to accept bytes. For the sake of backward compatibility, I propose to continue to accept strings.\r\n\n", "before_files": [{"content": "# Copyright 2017 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Types used in the Google BigQuery DB-API.\n\nSee `PEP-249`_ for details.\n\n.. _PEP-249:\n https://www.python.org/dev/peps/pep-0249/#type-objects-and-constructors\n\"\"\"\n\nimport datetime\n\n\nDate = datetime.date\nTime = datetime.time\nTimestamp = datetime.datetime\nDateFromTicks = datetime.date.fromtimestamp\nTimestampFromTicks = datetime.datetime.fromtimestamp\n\n\ndef Binary(string):\n \"\"\"Contruct a DB-API binary value.\n\n Args:\n string (str): A string to encode as a binary value.\n\n Returns:\n bytes: The UTF-8 encoded bytes representing the string.\n \"\"\"\n return string.encode(\"utf-8\")\n\n\ndef TimeFromTicks(ticks, tz=None):\n \"\"\"Construct a DB-API time value from the given ticks value.\n\n Args:\n ticks (float):\n a number of seconds since the epoch; see the documentation of the\n standard Python time module for details.\n\n tz (datetime.tzinfo): (Optional) time zone to use for conversion\n\n Returns:\n datetime.time: time represented by ticks.\n \"\"\"\n dt = datetime.datetime.fromtimestamp(ticks, tz=tz)\n return dt.timetz()\n\n\nclass _DBAPITypeObject(object):\n \"\"\"DB-API type object which compares equal to many different strings.\n\n See `PEP-249`_ for details.\n\n .. _PEP-249:\n https://www.python.org/dev/peps/pep-0249/#implementation-hints-for-module-authors\n \"\"\"\n\n def __init__(self, *values):\n self.values = values\n\n def __eq__(self, other):\n return other in self.values\n\n\nSTRING = \"STRING\"\nBINARY = _DBAPITypeObject(\"BYTES\", \"RECORD\", \"STRUCT\")\nNUMBER = _DBAPITypeObject(\n \"INTEGER\", \"INT64\", \"FLOAT\", \"FLOAT64\", \"NUMERIC\", \"BIGNUMERIC\", \"BOOLEAN\", \"BOOL\"\n)\nDATETIME = _DBAPITypeObject(\"TIMESTAMP\", \"DATE\", \"TIME\", \"DATETIME\")\nROWID = \"ROWID\"\n", "path": "google/cloud/bigquery/dbapi/types.py"}], "after_files": [{"content": "# Copyright 2017 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Types used in the Google BigQuery DB-API.\n\nSee `PEP-249`_ for details.\n\n.. _PEP-249:\n https://www.python.org/dev/peps/pep-0249/#type-objects-and-constructors\n\"\"\"\n\nimport datetime\n\n\nDate = datetime.date\nTime = datetime.time\nTimestamp = datetime.datetime\nDateFromTicks = datetime.date.fromtimestamp\nTimestampFromTicks = datetime.datetime.fromtimestamp\n\n\ndef Binary(data):\n \"\"\"Contruct a DB-API binary value.\n\n Args:\n data (bytes-like): An object containing binary data and that\n can be converted to bytes with the `bytes` builtin.\n\n Returns:\n bytes: The binary data as a bytes object.\n \"\"\"\n if isinstance(data, int):\n # This is not the conversion we're looking for, because it\n # will simply create a bytes object of the given size.\n raise TypeError(\"cannot convert `int` object to binary\")\n\n try:\n return bytes(data)\n except TypeError:\n if isinstance(data, str):\n return data.encode(\"utf-8\")\n else:\n raise\n\n\ndef TimeFromTicks(ticks, tz=None):\n \"\"\"Construct a DB-API time value from the given ticks value.\n\n Args:\n ticks (float):\n a number of seconds since the epoch; see the documentation of the\n standard Python time module for details.\n\n tz (datetime.tzinfo): (Optional) time zone to use for conversion\n\n Returns:\n datetime.time: time represented by ticks.\n \"\"\"\n dt = datetime.datetime.fromtimestamp(ticks, tz=tz)\n return dt.timetz()\n\n\nclass _DBAPITypeObject(object):\n \"\"\"DB-API type object which compares equal to many different strings.\n\n See `PEP-249`_ for details.\n\n .. _PEP-249:\n https://www.python.org/dev/peps/pep-0249/#implementation-hints-for-module-authors\n \"\"\"\n\n def __init__(self, *values):\n self.values = values\n\n def __eq__(self, other):\n return other in self.values\n\n\nSTRING = \"STRING\"\nBINARY = _DBAPITypeObject(\"BYTES\", \"RECORD\", \"STRUCT\")\nNUMBER = _DBAPITypeObject(\n \"INTEGER\", \"INT64\", \"FLOAT\", \"FLOAT64\", \"NUMERIC\", \"BIGNUMERIC\", \"BOOLEAN\", \"BOOL\"\n)\nDATETIME = _DBAPITypeObject(\"TIMESTAMP\", \"DATE\", \"TIME\", \"DATETIME\")\nROWID = \"ROWID\"\n", "path": "google/cloud/bigquery/dbapi/types.py"}]}
1,340
282
gh_patches_debug_27461
rasdani/github-patches
git_diff
napari__napari-745
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Show logo during startup ## 🚀 Feature Right now napari takes a long time to launch the viewer. People can get discouraged as they wait, unsure if napari is loading, or if their data is loading, or if it is just hanging. Showing the napari logo, possibly with / without some text during that time might make it feel better. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `napari/_qt/event_loop.py` Content: ``` 1 import sys 2 from contextlib import contextmanager 3 4 from qtpy.QtWidgets import QApplication 5 6 7 @contextmanager 8 def gui_qt(): 9 """Start a Qt event loop in which to run the application. 10 11 Notes 12 ----- 13 This context manager is not needed if running napari within an interactive 14 IPython session. In this case, use the ``%gui qt`` magic command, or start 15 IPython with the Qt GUI event loop enabled by default by using 16 ``ipython --gui=qt``. 17 """ 18 app = QApplication.instance() or QApplication(sys.argv) 19 yield 20 app.exec_() 21 ``` Path: `napari/__main__.py` Content: ``` 1 """ 2 napari command line viewer. 3 """ 4 import argparse 5 import sys 6 7 import numpy as np 8 9 from .util import io 10 from . import Viewer, gui_qt 11 12 13 def main(): 14 parser = argparse.ArgumentParser(usage=__doc__) 15 parser.add_argument('images', nargs='*', help='Images to view.') 16 parser.add_argument( 17 '--layers', 18 action='store_true', 19 help='Treat multiple input images as layers.', 20 ) 21 parser.add_argument( 22 '-r', 23 '--rgb', 24 help='Treat images as RGB.', 25 action='store_true', 26 default=None, 27 ) 28 parser.add_argument( 29 '-g', 30 '--grayscale', 31 dest='rgb', 32 action='store_false', 33 help='interpret all dimensions in the image as spatial', 34 ) 35 parser.add_argument( 36 '-D', 37 '--use-dask', 38 action='store_true', 39 help='Use dask to read in images. This conserves memory. This option ' 40 'does nothing if a single image is given.', 41 default=None, 42 ) 43 parser.add_argument( 44 '-N', 45 '--use-numpy', 46 action='store_false', 47 dest='use_dask', 48 help='Use NumPy to read in images. This can be more performant than ' 49 'dask if all the images fit in RAM. This option does nothing if ' 50 'only a single image is given.', 51 ) 52 args = parser.parse_args() 53 with gui_qt(): 54 v = Viewer() 55 if len(args.images) > 0: 56 images = io.magic_imread( 57 args.images, use_dask=args.use_dask, stack=not args.layers 58 ) 59 if args.layers: 60 for layer in images: 61 if layer.dtype in ( 62 np.int32, 63 np.uint32, 64 np.int64, 65 np.uint64, 66 ): 67 v.add_labels(layer) 68 else: 69 v.add_image(layer, rgb=args.rgb) 70 else: 71 v.add_image(images, rgb=args.rgb) 72 73 74 if __name__ == '__main__': 75 sys.exit(main()) 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/napari/__main__.py b/napari/__main__.py --- a/napari/__main__.py +++ b/napari/__main__.py @@ -50,7 +50,7 @@ 'only a single image is given.', ) args = parser.parse_args() - with gui_qt(): + with gui_qt(startup_logo=True): v = Viewer() if len(args.images) > 0: images = io.magic_imread( diff --git a/napari/_qt/event_loop.py b/napari/_qt/event_loop.py --- a/napari/_qt/event_loop.py +++ b/napari/_qt/event_loop.py @@ -1,13 +1,20 @@ import sys from contextlib import contextmanager +from os.path import dirname, join -from qtpy.QtWidgets import QApplication +from qtpy.QtGui import QPixmap +from qtpy.QtWidgets import QApplication, QSplashScreen @contextmanager -def gui_qt(): +def gui_qt(*, startup_logo=False): """Start a Qt event loop in which to run the application. + Parameters + ---------- + startup_logo : bool + Show a splash screen with the napari logo during startup. + Notes ----- This context manager is not needed if running napari within an interactive @@ -16,5 +23,11 @@ ``ipython --gui=qt``. """ app = QApplication.instance() or QApplication(sys.argv) + if startup_logo: + logopath = join(dirname(__file__), '..', 'resources', 'logo.png') + splash_widget = QSplashScreen(QPixmap(logopath).scaled(400, 400)) + splash_widget.show() yield + if startup_logo: + splash_widget.close() app.exec_()
{"golden_diff": "diff --git a/napari/__main__.py b/napari/__main__.py\n--- a/napari/__main__.py\n+++ b/napari/__main__.py\n@@ -50,7 +50,7 @@\n 'only a single image is given.',\n )\n args = parser.parse_args()\n- with gui_qt():\n+ with gui_qt(startup_logo=True):\n v = Viewer()\n if len(args.images) > 0:\n images = io.magic_imread(\ndiff --git a/napari/_qt/event_loop.py b/napari/_qt/event_loop.py\n--- a/napari/_qt/event_loop.py\n+++ b/napari/_qt/event_loop.py\n@@ -1,13 +1,20 @@\n import sys\n from contextlib import contextmanager\n+from os.path import dirname, join\n \n-from qtpy.QtWidgets import QApplication\n+from qtpy.QtGui import QPixmap\n+from qtpy.QtWidgets import QApplication, QSplashScreen\n \n \n @contextmanager\n-def gui_qt():\n+def gui_qt(*, startup_logo=False):\n \"\"\"Start a Qt event loop in which to run the application.\n \n+ Parameters\n+ ----------\n+ startup_logo : bool\n+ Show a splash screen with the napari logo during startup.\n+\n Notes\n -----\n This context manager is not needed if running napari within an interactive\n@@ -16,5 +23,11 @@\n ``ipython --gui=qt``.\n \"\"\"\n app = QApplication.instance() or QApplication(sys.argv)\n+ if startup_logo:\n+ logopath = join(dirname(__file__), '..', 'resources', 'logo.png')\n+ splash_widget = QSplashScreen(QPixmap(logopath).scaled(400, 400))\n+ splash_widget.show()\n yield\n+ if startup_logo:\n+ splash_widget.close()\n app.exec_()\n", "issue": "Show logo during startup\n## \ud83d\ude80 Feature\r\nRight now napari takes a long time to launch the viewer. People can get discouraged as they wait, unsure if napari is loading, or if their data is loading, or if it is just hanging. Showing the napari logo, possibly with / without some text during that time might make it feel better.\n", "before_files": [{"content": "import sys\nfrom contextlib import contextmanager\n\nfrom qtpy.QtWidgets import QApplication\n\n\n@contextmanager\ndef gui_qt():\n \"\"\"Start a Qt event loop in which to run the application.\n\n Notes\n -----\n This context manager is not needed if running napari within an interactive\n IPython session. In this case, use the ``%gui qt`` magic command, or start\n IPython with the Qt GUI event loop enabled by default by using\n ``ipython --gui=qt``.\n \"\"\"\n app = QApplication.instance() or QApplication(sys.argv)\n yield\n app.exec_()\n", "path": "napari/_qt/event_loop.py"}, {"content": "\"\"\"\nnapari command line viewer.\n\"\"\"\nimport argparse\nimport sys\n\nimport numpy as np\n\nfrom .util import io\nfrom . import Viewer, gui_qt\n\n\ndef main():\n parser = argparse.ArgumentParser(usage=__doc__)\n parser.add_argument('images', nargs='*', help='Images to view.')\n parser.add_argument(\n '--layers',\n action='store_true',\n help='Treat multiple input images as layers.',\n )\n parser.add_argument(\n '-r',\n '--rgb',\n help='Treat images as RGB.',\n action='store_true',\n default=None,\n )\n parser.add_argument(\n '-g',\n '--grayscale',\n dest='rgb',\n action='store_false',\n help='interpret all dimensions in the image as spatial',\n )\n parser.add_argument(\n '-D',\n '--use-dask',\n action='store_true',\n help='Use dask to read in images. This conserves memory. This option '\n 'does nothing if a single image is given.',\n default=None,\n )\n parser.add_argument(\n '-N',\n '--use-numpy',\n action='store_false',\n dest='use_dask',\n help='Use NumPy to read in images. This can be more performant than '\n 'dask if all the images fit in RAM. This option does nothing if '\n 'only a single image is given.',\n )\n args = parser.parse_args()\n with gui_qt():\n v = Viewer()\n if len(args.images) > 0:\n images = io.magic_imread(\n args.images, use_dask=args.use_dask, stack=not args.layers\n )\n if args.layers:\n for layer in images:\n if layer.dtype in (\n np.int32,\n np.uint32,\n np.int64,\n np.uint64,\n ):\n v.add_labels(layer)\n else:\n v.add_image(layer, rgb=args.rgb)\n else:\n v.add_image(images, rgb=args.rgb)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "napari/__main__.py"}], "after_files": [{"content": "import sys\nfrom contextlib import contextmanager\nfrom os.path import dirname, join\n\nfrom qtpy.QtGui import QPixmap\nfrom qtpy.QtWidgets import QApplication, QSplashScreen\n\n\n@contextmanager\ndef gui_qt(*, startup_logo=False):\n \"\"\"Start a Qt event loop in which to run the application.\n\n Parameters\n ----------\n startup_logo : bool\n Show a splash screen with the napari logo during startup.\n\n Notes\n -----\n This context manager is not needed if running napari within an interactive\n IPython session. In this case, use the ``%gui qt`` magic command, or start\n IPython with the Qt GUI event loop enabled by default by using\n ``ipython --gui=qt``.\n \"\"\"\n app = QApplication.instance() or QApplication(sys.argv)\n if startup_logo:\n logopath = join(dirname(__file__), '..', 'resources', 'logo.png')\n splash_widget = QSplashScreen(QPixmap(logopath).scaled(400, 400))\n splash_widget.show()\n yield\n if startup_logo:\n splash_widget.close()\n app.exec_()\n", "path": "napari/_qt/event_loop.py"}, {"content": "\"\"\"\nnapari command line viewer.\n\"\"\"\nimport argparse\nimport sys\n\nimport numpy as np\n\nfrom .util import io\nfrom . import Viewer, gui_qt\n\n\ndef main():\n parser = argparse.ArgumentParser(usage=__doc__)\n parser.add_argument('images', nargs='*', help='Images to view.')\n parser.add_argument(\n '--layers',\n action='store_true',\n help='Treat multiple input images as layers.',\n )\n parser.add_argument(\n '-r',\n '--rgb',\n help='Treat images as RGB.',\n action='store_true',\n default=None,\n )\n parser.add_argument(\n '-g',\n '--grayscale',\n dest='rgb',\n action='store_false',\n help='interpret all dimensions in the image as spatial',\n )\n parser.add_argument(\n '-D',\n '--use-dask',\n action='store_true',\n help='Use dask to read in images. This conserves memory. This option '\n 'does nothing if a single image is given.',\n default=None,\n )\n parser.add_argument(\n '-N',\n '--use-numpy',\n action='store_false',\n dest='use_dask',\n help='Use NumPy to read in images. This can be more performant than '\n 'dask if all the images fit in RAM. This option does nothing if '\n 'only a single image is given.',\n )\n args = parser.parse_args()\n with gui_qt(startup_logo=True):\n v = Viewer()\n if len(args.images) > 0:\n images = io.magic_imread(\n args.images, use_dask=args.use_dask, stack=not args.layers\n )\n if args.layers:\n for layer in images:\n if layer.dtype in (\n np.int32,\n np.uint32,\n np.int64,\n np.uint64,\n ):\n v.add_labels(layer)\n else:\n v.add_image(layer, rgb=args.rgb)\n else:\n v.add_image(images, rgb=args.rgb)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "napari/__main__.py"}]}
1,108
413
gh_patches_debug_31614
rasdani/github-patches
git_diff
GeotrekCE__Geotrek-admin-1306
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TRAIL filters Add "communes, secteurs, type physique, type foncier..." for TRAILS filters as it is in other modules. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `geotrek/land/filters.py` Content: ``` 1 from django.utils.translation import ugettext_lazy as _ 2 3 from mapentity.filters import MapEntityFilterSet 4 5 from geotrek.common.models import Organism 6 from geotrek.common.filters import StructureRelatedFilterSet 7 8 from geotrek.core.filters import TopologyFilter, PathFilterSet 9 from geotrek.infrastructure.filters import InfrastructureFilterSet, SignageFilterSet 10 from geotrek.maintenance.filters import InterventionFilterSet, ProjectFilterSet 11 from geotrek.trekking.filters import TrekFilterSet, POIFilterSet 12 from geotrek.zoning.filters import * # NOQA 13 14 from .models import ( 15 CompetenceEdge, LandEdge, LandType, PhysicalEdge, PhysicalType, 16 SignageManagementEdge, WorkManagementEdge, 17 ) 18 19 20 class PhysicalEdgeFilterSet(MapEntityFilterSet): 21 class Meta: 22 model = PhysicalEdge 23 fields = ['physical_type'] 24 25 26 class LandEdgeFilterSet(StructureRelatedFilterSet): 27 class Meta: 28 model = LandEdge 29 fields = ['land_type'] 30 31 32 class OrganismFilterSet(MapEntityFilterSet): 33 class Meta: 34 fields = ['organization'] 35 36 37 class CompetenceEdgeFilterSet(OrganismFilterSet): 38 class Meta(OrganismFilterSet.Meta): 39 model = CompetenceEdge 40 41 42 class WorkManagementEdgeFilterSet(OrganismFilterSet): 43 class Meta(OrganismFilterSet.Meta): 44 model = WorkManagementEdge 45 46 47 class SignageManagementEdgeFilterSet(OrganismFilterSet): 48 class Meta(OrganismFilterSet.Meta): 49 model = SignageManagementEdge 50 51 52 """ 53 54 Injected filter fields 55 56 """ 57 58 59 class TopologyFilterPhysicalType(TopologyFilter): 60 model = PhysicalType 61 62 def value_to_edges(self, value): 63 return value.physicaledge_set.all() 64 65 66 class TopologyFilterLandType(TopologyFilter): 67 model = LandType 68 69 def value_to_edges(self, value): 70 return value.landedge_set.all() 71 72 73 class TopologyFilterCompetenceEdge(TopologyFilter): 74 model = Organism 75 76 def value_to_edges(self, value): 77 return value.competenceedge_set.select_related('organization').all() 78 79 80 class TopologyFilterSignageManagementEdge(TopologyFilter): 81 model = Organism 82 83 def value_to_edges(self, value): 84 return value.signagemanagementedge_set.select_related('organization').all() 85 86 87 class TopologyFilterWorkManagementEdge(TopologyFilter): 88 model = Organism 89 90 def value_to_edges(self, value): 91 return value.workmanagementedge_set.select_related('organization').all() 92 93 94 def add_edge_filters(filter_set): 95 filter_set.add_filters({ 96 'physical_type': TopologyFilterPhysicalType(label=_('Physical type'), required=False), 97 'land_type': TopologyFilterLandType(label=_('Land type'), required=False), 98 'competence': TopologyFilterCompetenceEdge(label=_('Competence'), required=False), 99 'signage': TopologyFilterSignageManagementEdge(label=_('Signage management'), required=False), 100 'work': TopologyFilterWorkManagementEdge(label=_('Work management'), required=False), 101 }) 102 103 104 add_edge_filters(TrekFilterSet) 105 add_edge_filters(POIFilterSet) 106 add_edge_filters(InterventionFilterSet) 107 add_edge_filters(ProjectFilterSet) 108 add_edge_filters(PathFilterSet) 109 add_edge_filters(InfrastructureFilterSet) 110 add_edge_filters(SignageFilterSet) 111 ``` Path: `geotrek/zoning/filters.py` Content: ``` 1 from django.utils.translation import ugettext_lazy as _ 2 3 from geotrek.core.filters import TopologyFilter, PathFilterSet 4 from geotrek.infrastructure.filters import InfrastructureFilterSet, SignageFilterSet 5 from geotrek.maintenance.filters import InterventionFilterSet, ProjectFilterSet 6 from geotrek.trekking.filters import TrekFilterSet, POIFilterSet 7 from geotrek.tourism.filters import TouristicContentFilterSet, TouristicEventFilterSet 8 from geotrek.zoning.models import City, District 9 10 11 class TopologyFilterCity(TopologyFilter): 12 model = City 13 14 def value_to_edges(self, value): 15 return value.cityedge_set.all() 16 17 18 class TopologyFilterDistrict(TopologyFilter): 19 model = District 20 21 def value_to_edges(self, value): 22 return value.districtedge_set.all() 23 24 25 def add_edge_filters(filter_set): 26 filter_set.add_filters({ 27 'city': TopologyFilterCity(label=_('City'), required=False), 28 'district': TopologyFilterDistrict(label=_('District'), required=False), 29 }) 30 31 32 add_edge_filters(TrekFilterSet) 33 add_edge_filters(POIFilterSet) 34 add_edge_filters(InterventionFilterSet) 35 add_edge_filters(ProjectFilterSet) 36 add_edge_filters(PathFilterSet) 37 add_edge_filters(InfrastructureFilterSet) 38 add_edge_filters(SignageFilterSet) 39 40 41 class IntersectionFilter(TopologyFilter): 42 """Inherit from ``TopologyFilter``, just to make sure the widgets 43 will be initialized the same way. 44 """ 45 def filter(self, qs, value): 46 if not value: 47 return qs 48 return qs.filter(geom__intersects=value.geom) 49 50 51 class IntersectionFilterCity(IntersectionFilter): 52 model = City 53 54 55 class IntersectionFilterDistrict(IntersectionFilter): 56 model = District 57 58 59 TouristicContentFilterSet.add_filters({ 60 'city': IntersectionFilterCity(label=_('City'), required=False), 61 'district': IntersectionFilterDistrict(label=_('District'), required=False), 62 }) 63 64 TouristicEventFilterSet.add_filters({ 65 'city': IntersectionFilterCity(label=_('City'), required=False), 66 'district': IntersectionFilterDistrict(label=_('District'), required=False), 67 }) 68 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/geotrek/land/filters.py b/geotrek/land/filters.py --- a/geotrek/land/filters.py +++ b/geotrek/land/filters.py @@ -5,7 +5,7 @@ from geotrek.common.models import Organism from geotrek.common.filters import StructureRelatedFilterSet -from geotrek.core.filters import TopologyFilter, PathFilterSet +from geotrek.core.filters import TopologyFilter, PathFilterSet, TrailFilterSet from geotrek.infrastructure.filters import InfrastructureFilterSet, SignageFilterSet from geotrek.maintenance.filters import InterventionFilterSet, ProjectFilterSet from geotrek.trekking.filters import TrekFilterSet, POIFilterSet @@ -108,3 +108,4 @@ add_edge_filters(PathFilterSet) add_edge_filters(InfrastructureFilterSet) add_edge_filters(SignageFilterSet) +add_edge_filters(TrailFilterSet) diff --git a/geotrek/zoning/filters.py b/geotrek/zoning/filters.py --- a/geotrek/zoning/filters.py +++ b/geotrek/zoning/filters.py @@ -1,6 +1,6 @@ from django.utils.translation import ugettext_lazy as _ -from geotrek.core.filters import TopologyFilter, PathFilterSet +from geotrek.core.filters import TopologyFilter, PathFilterSet, TrailFilterSet from geotrek.infrastructure.filters import InfrastructureFilterSet, SignageFilterSet from geotrek.maintenance.filters import InterventionFilterSet, ProjectFilterSet from geotrek.trekking.filters import TrekFilterSet, POIFilterSet @@ -36,6 +36,7 @@ add_edge_filters(PathFilterSet) add_edge_filters(InfrastructureFilterSet) add_edge_filters(SignageFilterSet) +add_edge_filters(TrailFilterSet) class IntersectionFilter(TopologyFilter):
{"golden_diff": "diff --git a/geotrek/land/filters.py b/geotrek/land/filters.py\n--- a/geotrek/land/filters.py\n+++ b/geotrek/land/filters.py\n@@ -5,7 +5,7 @@\n from geotrek.common.models import Organism\n from geotrek.common.filters import StructureRelatedFilterSet\n \n-from geotrek.core.filters import TopologyFilter, PathFilterSet\n+from geotrek.core.filters import TopologyFilter, PathFilterSet, TrailFilterSet\n from geotrek.infrastructure.filters import InfrastructureFilterSet, SignageFilterSet\n from geotrek.maintenance.filters import InterventionFilterSet, ProjectFilterSet\n from geotrek.trekking.filters import TrekFilterSet, POIFilterSet\n@@ -108,3 +108,4 @@\n add_edge_filters(PathFilterSet)\n add_edge_filters(InfrastructureFilterSet)\n add_edge_filters(SignageFilterSet)\n+add_edge_filters(TrailFilterSet)\ndiff --git a/geotrek/zoning/filters.py b/geotrek/zoning/filters.py\n--- a/geotrek/zoning/filters.py\n+++ b/geotrek/zoning/filters.py\n@@ -1,6 +1,6 @@\n from django.utils.translation import ugettext_lazy as _\n \n-from geotrek.core.filters import TopologyFilter, PathFilterSet\n+from geotrek.core.filters import TopologyFilter, PathFilterSet, TrailFilterSet\n from geotrek.infrastructure.filters import InfrastructureFilterSet, SignageFilterSet\n from geotrek.maintenance.filters import InterventionFilterSet, ProjectFilterSet\n from geotrek.trekking.filters import TrekFilterSet, POIFilterSet\n@@ -36,6 +36,7 @@\n add_edge_filters(PathFilterSet)\n add_edge_filters(InfrastructureFilterSet)\n add_edge_filters(SignageFilterSet)\n+add_edge_filters(TrailFilterSet)\n \n \n class IntersectionFilter(TopologyFilter):\n", "issue": "TRAIL filters\nAdd \"communes, secteurs, type physique, type foncier...\" for TRAILS filters as it is in other modules.\n\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom mapentity.filters import MapEntityFilterSet\n\nfrom geotrek.common.models import Organism\nfrom geotrek.common.filters import StructureRelatedFilterSet\n\nfrom geotrek.core.filters import TopologyFilter, PathFilterSet\nfrom geotrek.infrastructure.filters import InfrastructureFilterSet, SignageFilterSet\nfrom geotrek.maintenance.filters import InterventionFilterSet, ProjectFilterSet\nfrom geotrek.trekking.filters import TrekFilterSet, POIFilterSet\nfrom geotrek.zoning.filters import * # NOQA\n\nfrom .models import (\n CompetenceEdge, LandEdge, LandType, PhysicalEdge, PhysicalType,\n SignageManagementEdge, WorkManagementEdge,\n)\n\n\nclass PhysicalEdgeFilterSet(MapEntityFilterSet):\n class Meta:\n model = PhysicalEdge\n fields = ['physical_type']\n\n\nclass LandEdgeFilterSet(StructureRelatedFilterSet):\n class Meta:\n model = LandEdge\n fields = ['land_type']\n\n\nclass OrganismFilterSet(MapEntityFilterSet):\n class Meta:\n fields = ['organization']\n\n\nclass CompetenceEdgeFilterSet(OrganismFilterSet):\n class Meta(OrganismFilterSet.Meta):\n model = CompetenceEdge\n\n\nclass WorkManagementEdgeFilterSet(OrganismFilterSet):\n class Meta(OrganismFilterSet.Meta):\n model = WorkManagementEdge\n\n\nclass SignageManagementEdgeFilterSet(OrganismFilterSet):\n class Meta(OrganismFilterSet.Meta):\n model = SignageManagementEdge\n\n\n\"\"\"\n\n Injected filter fields\n\n\"\"\"\n\n\nclass TopologyFilterPhysicalType(TopologyFilter):\n model = PhysicalType\n\n def value_to_edges(self, value):\n return value.physicaledge_set.all()\n\n\nclass TopologyFilterLandType(TopologyFilter):\n model = LandType\n\n def value_to_edges(self, value):\n return value.landedge_set.all()\n\n\nclass TopologyFilterCompetenceEdge(TopologyFilter):\n model = Organism\n\n def value_to_edges(self, value):\n return value.competenceedge_set.select_related('organization').all()\n\n\nclass TopologyFilterSignageManagementEdge(TopologyFilter):\n model = Organism\n\n def value_to_edges(self, value):\n return value.signagemanagementedge_set.select_related('organization').all()\n\n\nclass TopologyFilterWorkManagementEdge(TopologyFilter):\n model = Organism\n\n def value_to_edges(self, value):\n return value.workmanagementedge_set.select_related('organization').all()\n\n\ndef add_edge_filters(filter_set):\n filter_set.add_filters({\n 'physical_type': TopologyFilterPhysicalType(label=_('Physical type'), required=False),\n 'land_type': TopologyFilterLandType(label=_('Land type'), required=False),\n 'competence': TopologyFilterCompetenceEdge(label=_('Competence'), required=False),\n 'signage': TopologyFilterSignageManagementEdge(label=_('Signage management'), required=False),\n 'work': TopologyFilterWorkManagementEdge(label=_('Work management'), required=False),\n })\n\n\nadd_edge_filters(TrekFilterSet)\nadd_edge_filters(POIFilterSet)\nadd_edge_filters(InterventionFilterSet)\nadd_edge_filters(ProjectFilterSet)\nadd_edge_filters(PathFilterSet)\nadd_edge_filters(InfrastructureFilterSet)\nadd_edge_filters(SignageFilterSet)\n", "path": "geotrek/land/filters.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom geotrek.core.filters import TopologyFilter, PathFilterSet\nfrom geotrek.infrastructure.filters import InfrastructureFilterSet, SignageFilterSet\nfrom geotrek.maintenance.filters import InterventionFilterSet, ProjectFilterSet\nfrom geotrek.trekking.filters import TrekFilterSet, POIFilterSet\nfrom geotrek.tourism.filters import TouristicContentFilterSet, TouristicEventFilterSet\nfrom geotrek.zoning.models import City, District\n\n\nclass TopologyFilterCity(TopologyFilter):\n model = City\n\n def value_to_edges(self, value):\n return value.cityedge_set.all()\n\n\nclass TopologyFilterDistrict(TopologyFilter):\n model = District\n\n def value_to_edges(self, value):\n return value.districtedge_set.all()\n\n\ndef add_edge_filters(filter_set):\n filter_set.add_filters({\n 'city': TopologyFilterCity(label=_('City'), required=False),\n 'district': TopologyFilterDistrict(label=_('District'), required=False),\n })\n\n\nadd_edge_filters(TrekFilterSet)\nadd_edge_filters(POIFilterSet)\nadd_edge_filters(InterventionFilterSet)\nadd_edge_filters(ProjectFilterSet)\nadd_edge_filters(PathFilterSet)\nadd_edge_filters(InfrastructureFilterSet)\nadd_edge_filters(SignageFilterSet)\n\n\nclass IntersectionFilter(TopologyFilter):\n \"\"\"Inherit from ``TopologyFilter``, just to make sure the widgets\n will be initialized the same way.\n \"\"\"\n def filter(self, qs, value):\n if not value:\n return qs\n return qs.filter(geom__intersects=value.geom)\n\n\nclass IntersectionFilterCity(IntersectionFilter):\n model = City\n\n\nclass IntersectionFilterDistrict(IntersectionFilter):\n model = District\n\n\nTouristicContentFilterSet.add_filters({\n 'city': IntersectionFilterCity(label=_('City'), required=False),\n 'district': IntersectionFilterDistrict(label=_('District'), required=False),\n})\n\nTouristicEventFilterSet.add_filters({\n 'city': IntersectionFilterCity(label=_('City'), required=False),\n 'district': IntersectionFilterDistrict(label=_('District'), required=False),\n})\n", "path": "geotrek/zoning/filters.py"}], "after_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom mapentity.filters import MapEntityFilterSet\n\nfrom geotrek.common.models import Organism\nfrom geotrek.common.filters import StructureRelatedFilterSet\n\nfrom geotrek.core.filters import TopologyFilter, PathFilterSet, TrailFilterSet\nfrom geotrek.infrastructure.filters import InfrastructureFilterSet, SignageFilterSet\nfrom geotrek.maintenance.filters import InterventionFilterSet, ProjectFilterSet\nfrom geotrek.trekking.filters import TrekFilterSet, POIFilterSet\nfrom geotrek.zoning.filters import * # NOQA\n\nfrom .models import (\n CompetenceEdge, LandEdge, LandType, PhysicalEdge, PhysicalType,\n SignageManagementEdge, WorkManagementEdge,\n)\n\n\nclass PhysicalEdgeFilterSet(MapEntityFilterSet):\n class Meta:\n model = PhysicalEdge\n fields = ['physical_type']\n\n\nclass LandEdgeFilterSet(StructureRelatedFilterSet):\n class Meta:\n model = LandEdge\n fields = ['land_type']\n\n\nclass OrganismFilterSet(MapEntityFilterSet):\n class Meta:\n fields = ['organization']\n\n\nclass CompetenceEdgeFilterSet(OrganismFilterSet):\n class Meta(OrganismFilterSet.Meta):\n model = CompetenceEdge\n\n\nclass WorkManagementEdgeFilterSet(OrganismFilterSet):\n class Meta(OrganismFilterSet.Meta):\n model = WorkManagementEdge\n\n\nclass SignageManagementEdgeFilterSet(OrganismFilterSet):\n class Meta(OrganismFilterSet.Meta):\n model = SignageManagementEdge\n\n\n\"\"\"\n\n Injected filter fields\n\n\"\"\"\n\n\nclass TopologyFilterPhysicalType(TopologyFilter):\n model = PhysicalType\n\n def value_to_edges(self, value):\n return value.physicaledge_set.all()\n\n\nclass TopologyFilterLandType(TopologyFilter):\n model = LandType\n\n def value_to_edges(self, value):\n return value.landedge_set.all()\n\n\nclass TopologyFilterCompetenceEdge(TopologyFilter):\n model = Organism\n\n def value_to_edges(self, value):\n return value.competenceedge_set.select_related('organization').all()\n\n\nclass TopologyFilterSignageManagementEdge(TopologyFilter):\n model = Organism\n\n def value_to_edges(self, value):\n return value.signagemanagementedge_set.select_related('organization').all()\n\n\nclass TopologyFilterWorkManagementEdge(TopologyFilter):\n model = Organism\n\n def value_to_edges(self, value):\n return value.workmanagementedge_set.select_related('organization').all()\n\n\ndef add_edge_filters(filter_set):\n filter_set.add_filters({\n 'physical_type': TopologyFilterPhysicalType(label=_('Physical type'), required=False),\n 'land_type': TopologyFilterLandType(label=_('Land type'), required=False),\n 'competence': TopologyFilterCompetenceEdge(label=_('Competence'), required=False),\n 'signage': TopologyFilterSignageManagementEdge(label=_('Signage management'), required=False),\n 'work': TopologyFilterWorkManagementEdge(label=_('Work management'), required=False),\n })\n\n\nadd_edge_filters(TrekFilterSet)\nadd_edge_filters(POIFilterSet)\nadd_edge_filters(InterventionFilterSet)\nadd_edge_filters(ProjectFilterSet)\nadd_edge_filters(PathFilterSet)\nadd_edge_filters(InfrastructureFilterSet)\nadd_edge_filters(SignageFilterSet)\nadd_edge_filters(TrailFilterSet)\n", "path": "geotrek/land/filters.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom geotrek.core.filters import TopologyFilter, PathFilterSet, TrailFilterSet\nfrom geotrek.infrastructure.filters import InfrastructureFilterSet, SignageFilterSet\nfrom geotrek.maintenance.filters import InterventionFilterSet, ProjectFilterSet\nfrom geotrek.trekking.filters import TrekFilterSet, POIFilterSet\nfrom geotrek.tourism.filters import TouristicContentFilterSet, TouristicEventFilterSet\nfrom geotrek.zoning.models import City, District\n\n\nclass TopologyFilterCity(TopologyFilter):\n model = City\n\n def value_to_edges(self, value):\n return value.cityedge_set.all()\n\n\nclass TopologyFilterDistrict(TopologyFilter):\n model = District\n\n def value_to_edges(self, value):\n return value.districtedge_set.all()\n\n\ndef add_edge_filters(filter_set):\n filter_set.add_filters({\n 'city': TopologyFilterCity(label=_('City'), required=False),\n 'district': TopologyFilterDistrict(label=_('District'), required=False),\n })\n\n\nadd_edge_filters(TrekFilterSet)\nadd_edge_filters(POIFilterSet)\nadd_edge_filters(InterventionFilterSet)\nadd_edge_filters(ProjectFilterSet)\nadd_edge_filters(PathFilterSet)\nadd_edge_filters(InfrastructureFilterSet)\nadd_edge_filters(SignageFilterSet)\nadd_edge_filters(TrailFilterSet)\n\n\nclass IntersectionFilter(TopologyFilter):\n \"\"\"Inherit from ``TopologyFilter``, just to make sure the widgets\n will be initialized the same way.\n \"\"\"\n def filter(self, qs, value):\n if not value:\n return qs\n return qs.filter(geom__intersects=value.geom)\n\n\nclass IntersectionFilterCity(IntersectionFilter):\n model = City\n\n\nclass IntersectionFilterDistrict(IntersectionFilter):\n model = District\n\n\nTouristicContentFilterSet.add_filters({\n 'city': IntersectionFilterCity(label=_('City'), required=False),\n 'district': IntersectionFilterDistrict(label=_('District'), required=False),\n})\n\nTouristicEventFilterSet.add_filters({\n 'city': IntersectionFilterCity(label=_('City'), required=False),\n 'district': IntersectionFilterDistrict(label=_('District'), required=False),\n})\n", "path": "geotrek/zoning/filters.py"}]}
1,852
414
gh_patches_debug_47458
rasdani/github-patches
git_diff
awslabs__gluonts-1132
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update pandas dependency As documented in #967, pandas will be fixing the breaking change that led us to fix the dependency to `<1.1`, see pandas-dev/pandas#37267 Once that is resolved, we could remove the constraint. *Edit:* we should also make sure to find a solution to #965 first --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/gluonts/time_feature/_base.py` Content: ``` 1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"). 4 # You may not use this file except in compliance with the License. 5 # A copy of the License is located at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # or in the "license" file accompanying this file. This file is distributed 10 # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 11 # express or implied. See the License for the specific language governing 12 # permissions and limitations under the License. 13 14 from typing import List 15 16 # Third-party imports 17 import numpy as np 18 import pandas as pd 19 from pandas.tseries import offsets 20 from pandas.tseries.frequencies import to_offset 21 22 # First-party imports 23 from gluonts.core.component import validated 24 25 26 class TimeFeature: 27 """ 28 Base class for features that only depend on time. 29 """ 30 31 @validated() 32 def __init__(self, normalized: bool = True): 33 self.normalized = normalized 34 35 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 36 pass 37 38 def __repr__(self): 39 return self.__class__.__name__ + "()" 40 41 42 class MinuteOfHour(TimeFeature): 43 """ 44 Minute of hour encoded as value between [-0.5, 0.5] 45 """ 46 47 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 48 if self.normalized: 49 return index.minute / 59.0 - 0.5 50 else: 51 return index.minute.map(float) 52 53 54 class HourOfDay(TimeFeature): 55 """ 56 Hour of day encoded as value between [-0.5, 0.5] 57 """ 58 59 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 60 if self.normalized: 61 return index.hour / 23.0 - 0.5 62 else: 63 return index.hour.map(float) 64 65 66 class DayOfWeek(TimeFeature): 67 """ 68 Hour of day encoded as value between [-0.5, 0.5] 69 """ 70 71 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 72 if self.normalized: 73 return index.dayofweek / 6.0 - 0.5 74 else: 75 return index.dayofweek.map(float) 76 77 78 class DayOfMonth(TimeFeature): 79 """ 80 Day of month encoded as value between [-0.5, 0.5] 81 """ 82 83 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 84 if self.normalized: 85 return index.day / 30.0 - 0.5 86 else: 87 return index.day.map(float) 88 89 90 class DayOfYear(TimeFeature): 91 """ 92 Day of year encoded as value between [-0.5, 0.5] 93 """ 94 95 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 96 if self.normalized: 97 return index.dayofyear / 364.0 - 0.5 98 else: 99 return index.dayofyear.map(float) 100 101 102 class MonthOfYear(TimeFeature): 103 """ 104 Month of year encoded as value between [-0.5, 0.5] 105 """ 106 107 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 108 if self.normalized: 109 return index.month / 11.0 - 0.5 110 else: 111 return index.month.map(float) 112 113 114 class WeekOfYear(TimeFeature): 115 """ 116 Week of year encoded as value between [-0.5, 0.5] 117 """ 118 119 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 120 if self.normalized: 121 return index.weekofyear / 51.0 - 0.5 122 else: 123 return index.weekofyear.map(float) 124 125 126 def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]: 127 """ 128 Returns a list of time features that will be appropriate for the given frequency string. 129 130 Parameters 131 ---------- 132 133 freq_str 134 Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc. 135 136 """ 137 138 features_by_offsets = { 139 offsets.YearOffset: [], 140 offsets.MonthOffset: [MonthOfYear], 141 offsets.Week: [DayOfMonth, WeekOfYear], 142 offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear], 143 offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear], 144 offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear], 145 offsets.Minute: [ 146 MinuteOfHour, 147 HourOfDay, 148 DayOfWeek, 149 DayOfMonth, 150 DayOfYear, 151 ], 152 } 153 154 offset = to_offset(freq_str) 155 156 for offset_type, feature_classes in features_by_offsets.items(): 157 if isinstance(offset, offset_type): 158 return [cls() for cls in feature_classes] 159 160 supported_freq_msg = f""" 161 Unsupported frequency {freq_str} 162 163 The following frequencies are supported: 164 165 Y - yearly 166 alias: A 167 M - monthly 168 W - weekly 169 D - daily 170 B - business days 171 H - hourly 172 T - minutely 173 alias: min 174 """ 175 raise RuntimeError(supported_freq_msg) 176 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/gluonts/time_feature/_base.py b/src/gluonts/time_feature/_base.py --- a/src/gluonts/time_feature/_base.py +++ b/src/gluonts/time_feature/_base.py @@ -136,8 +136,8 @@ """ features_by_offsets = { - offsets.YearOffset: [], - offsets.MonthOffset: [MonthOfYear], + offsets.YearEnd: [], + offsets.MonthEnd: [MonthOfYear], offsets.Week: [DayOfMonth, WeekOfYear], offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear], offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
{"golden_diff": "diff --git a/src/gluonts/time_feature/_base.py b/src/gluonts/time_feature/_base.py\n--- a/src/gluonts/time_feature/_base.py\n+++ b/src/gluonts/time_feature/_base.py\n@@ -136,8 +136,8 @@\n \"\"\"\n \n features_by_offsets = {\n- offsets.YearOffset: [],\n- offsets.MonthOffset: [MonthOfYear],\n+ offsets.YearEnd: [],\n+ offsets.MonthEnd: [MonthOfYear],\n offsets.Week: [DayOfMonth, WeekOfYear],\n offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n", "issue": "Update pandas dependency\nAs documented in #967, pandas will be fixing the breaking change that led us to fix the dependency to `<1.1`, see pandas-dev/pandas#37267\r\n\r\nOnce that is resolved, we could remove the constraint.\r\n\r\n*Edit:* we should also make sure to find a solution to #965 first\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import List\n\n# Third-party imports\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries import offsets\nfrom pandas.tseries.frequencies import to_offset\n\n# First-party imports\nfrom gluonts.core.component import validated\n\n\nclass TimeFeature:\n \"\"\"\n Base class for features that only depend on time.\n \"\"\"\n\n @validated()\n def __init__(self, normalized: bool = True):\n self.normalized = normalized\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n pass\n\n def __repr__(self):\n return self.__class__.__name__ + \"()\"\n\n\nclass MinuteOfHour(TimeFeature):\n \"\"\"\n Minute of hour encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.minute / 59.0 - 0.5\n else:\n return index.minute.map(float)\n\n\nclass HourOfDay(TimeFeature):\n \"\"\"\n Hour of day encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.hour / 23.0 - 0.5\n else:\n return index.hour.map(float)\n\n\nclass DayOfWeek(TimeFeature):\n \"\"\"\n Hour of day encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.dayofweek / 6.0 - 0.5\n else:\n return index.dayofweek.map(float)\n\n\nclass DayOfMonth(TimeFeature):\n \"\"\"\n Day of month encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.day / 30.0 - 0.5\n else:\n return index.day.map(float)\n\n\nclass DayOfYear(TimeFeature):\n \"\"\"\n Day of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.dayofyear / 364.0 - 0.5\n else:\n return index.dayofyear.map(float)\n\n\nclass MonthOfYear(TimeFeature):\n \"\"\"\n Month of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.month / 11.0 - 0.5\n else:\n return index.month.map(float)\n\n\nclass WeekOfYear(TimeFeature):\n \"\"\"\n Week of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.weekofyear / 51.0 - 0.5\n else:\n return index.weekofyear.map(float)\n\n\ndef time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:\n \"\"\"\n Returns a list of time features that will be appropriate for the given frequency string.\n\n Parameters\n ----------\n\n freq_str\n Frequency string of the form [multiple][granularity] such as \"12H\", \"5min\", \"1D\" etc.\n\n \"\"\"\n\n features_by_offsets = {\n offsets.YearOffset: [],\n offsets.MonthOffset: [MonthOfYear],\n offsets.Week: [DayOfMonth, WeekOfYear],\n offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Minute: [\n MinuteOfHour,\n HourOfDay,\n DayOfWeek,\n DayOfMonth,\n DayOfYear,\n ],\n }\n\n offset = to_offset(freq_str)\n\n for offset_type, feature_classes in features_by_offsets.items():\n if isinstance(offset, offset_type):\n return [cls() for cls in feature_classes]\n\n supported_freq_msg = f\"\"\"\n Unsupported frequency {freq_str}\n\n The following frequencies are supported:\n\n Y - yearly\n alias: A\n M - monthly\n W - weekly\n D - daily\n B - business days\n H - hourly\n T - minutely\n alias: min\n \"\"\"\n raise RuntimeError(supported_freq_msg)\n", "path": "src/gluonts/time_feature/_base.py"}], "after_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import List\n\n# Third-party imports\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries import offsets\nfrom pandas.tseries.frequencies import to_offset\n\n# First-party imports\nfrom gluonts.core.component import validated\n\n\nclass TimeFeature:\n \"\"\"\n Base class for features that only depend on time.\n \"\"\"\n\n @validated()\n def __init__(self, normalized: bool = True):\n self.normalized = normalized\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n pass\n\n def __repr__(self):\n return self.__class__.__name__ + \"()\"\n\n\nclass MinuteOfHour(TimeFeature):\n \"\"\"\n Minute of hour encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.minute / 59.0 - 0.5\n else:\n return index.minute.map(float)\n\n\nclass HourOfDay(TimeFeature):\n \"\"\"\n Hour of day encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.hour / 23.0 - 0.5\n else:\n return index.hour.map(float)\n\n\nclass DayOfWeek(TimeFeature):\n \"\"\"\n Hour of day encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.dayofweek / 6.0 - 0.5\n else:\n return index.dayofweek.map(float)\n\n\nclass DayOfMonth(TimeFeature):\n \"\"\"\n Day of month encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.day / 30.0 - 0.5\n else:\n return index.day.map(float)\n\n\nclass DayOfYear(TimeFeature):\n \"\"\"\n Day of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.dayofyear / 364.0 - 0.5\n else:\n return index.dayofyear.map(float)\n\n\nclass MonthOfYear(TimeFeature):\n \"\"\"\n Month of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.month / 11.0 - 0.5\n else:\n return index.month.map(float)\n\n\nclass WeekOfYear(TimeFeature):\n \"\"\"\n Week of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.weekofyear / 51.0 - 0.5\n else:\n return index.weekofyear.map(float)\n\n\ndef time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:\n \"\"\"\n Returns a list of time features that will be appropriate for the given frequency string.\n\n Parameters\n ----------\n\n freq_str\n Frequency string of the form [multiple][granularity] such as \"12H\", \"5min\", \"1D\" etc.\n\n \"\"\"\n\n features_by_offsets = {\n offsets.YearEnd: [],\n offsets.MonthEnd: [MonthOfYear],\n offsets.Week: [DayOfMonth, WeekOfYear],\n offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Minute: [\n MinuteOfHour,\n HourOfDay,\n DayOfWeek,\n DayOfMonth,\n DayOfYear,\n ],\n }\n\n offset = to_offset(freq_str)\n\n for offset_type, feature_classes in features_by_offsets.items():\n if isinstance(offset, offset_type):\n return [cls() for cls in feature_classes]\n\n supported_freq_msg = f\"\"\"\n Unsupported frequency {freq_str}\n\n The following frequencies are supported:\n\n Y - yearly\n alias: A\n M - monthly\n W - weekly\n D - daily\n B - business days\n H - hourly\n T - minutely\n alias: min\n \"\"\"\n raise RuntimeError(supported_freq_msg)\n", "path": "src/gluonts/time_feature/_base.py"}]}
1,928
150
gh_patches_debug_12149
rasdani/github-patches
git_diff
mindsdb__mindsdb-813
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add endpoint for mindsdb version Add HTTP route to get a version of mindsdb: /util/version/ That should return as mindsb version in JSON view: ``` { "mindsdb": "2.11.2" } ``` The new endpoint needs to be added in [Utils](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/api/http/namespaces/util.py). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mindsdb/api/http/namespaces/util.py` Content: ``` 1 from flask import request 2 from flask_restx import Resource, abort 3 4 from mindsdb.api.http.namespaces.configs.util import ns_conf 5 6 7 @ns_conf.route('/ping') 8 class Ping(Resource): 9 @ns_conf.doc('get_ping') 10 def get(self): 11 '''Checks server avaliable''' 12 return {'status': 'ok'} 13 14 15 @ns_conf.route('/shutdown') 16 class Shutdown(Resource): 17 @ns_conf.doc('get_shutdown') 18 def get(self): 19 '''Shutdown server''' 20 if request.host.startswith('127.0.0.1') or request.host.startswith('localhost'): 21 func = request.environ.get('werkzeug.server.shutdown') 22 if func is None: 23 return '', 500 24 func() 25 return '', 200 26 abort(403, "") 27 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mindsdb/api/http/namespaces/util.py b/mindsdb/api/http/namespaces/util.py --- a/mindsdb/api/http/namespaces/util.py +++ b/mindsdb/api/http/namespaces/util.py @@ -2,7 +2,7 @@ from flask_restx import Resource, abort from mindsdb.api.http.namespaces.configs.util import ns_conf - +from mindsdb import __about__ @ns_conf.route('/ping') class Ping(Resource): @@ -24,3 +24,12 @@ func() return '', 200 abort(403, "") + + +@ns_conf.route('/util/version') +class Version(Resource): + @ns_conf.doc('get_endpoint') + def get(self): + '''Check endpoint''' + return {'mindsdb': "{__about__.__version__}"} +
{"golden_diff": "diff --git a/mindsdb/api/http/namespaces/util.py b/mindsdb/api/http/namespaces/util.py\n--- a/mindsdb/api/http/namespaces/util.py\n+++ b/mindsdb/api/http/namespaces/util.py\n@@ -2,7 +2,7 @@\n from flask_restx import Resource, abort\n \n from mindsdb.api.http.namespaces.configs.util import ns_conf\n-\n+from mindsdb import __about__\n \n @ns_conf.route('/ping')\n class Ping(Resource):\n@@ -24,3 +24,12 @@\n func()\n return '', 200\n abort(403, \"\")\n+\n+ \n+@ns_conf.route('/util/version')\n+class Version(Resource):\n+ @ns_conf.doc('get_endpoint')\n+ def get(self):\n+ '''Check endpoint'''\n+ return {'mindsdb': \"{__about__.__version__}\"}\n+\n", "issue": "Add endpoint for mindsdb version\nAdd HTTP route to get a version of mindsdb: /util/version/\r\nThat should return as mindsb version in JSON view:\r\n```\r\n{\r\n \"mindsdb\": \"2.11.2\"\r\n}\r\n```\r\nThe new endpoint needs to be added in [Utils](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/api/http/namespaces/util.py).\n", "before_files": [{"content": "from flask import request\nfrom flask_restx import Resource, abort\n\nfrom mindsdb.api.http.namespaces.configs.util import ns_conf\n\n\n@ns_conf.route('/ping')\nclass Ping(Resource):\n @ns_conf.doc('get_ping')\n def get(self):\n '''Checks server avaliable'''\n return {'status': 'ok'}\n\n\n@ns_conf.route('/shutdown')\nclass Shutdown(Resource):\n @ns_conf.doc('get_shutdown')\n def get(self):\n '''Shutdown server'''\n if request.host.startswith('127.0.0.1') or request.host.startswith('localhost'):\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n return '', 500\n func()\n return '', 200\n abort(403, \"\")\n", "path": "mindsdb/api/http/namespaces/util.py"}], "after_files": [{"content": "from flask import request\nfrom flask_restx import Resource, abort\n\nfrom mindsdb.api.http.namespaces.configs.util import ns_conf\nfrom mindsdb import __about__\n\n@ns_conf.route('/ping')\nclass Ping(Resource):\n @ns_conf.doc('get_ping')\n def get(self):\n '''Checks server avaliable'''\n return {'status': 'ok'}\n\n\n@ns_conf.route('/shutdown')\nclass Shutdown(Resource):\n @ns_conf.doc('get_shutdown')\n def get(self):\n '''Shutdown server'''\n if request.host.startswith('127.0.0.1') or request.host.startswith('localhost'):\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n return '', 500\n func()\n return '', 200\n abort(403, \"\")\n\n \n@ns_conf.route('/util/version')\nclass Version(Resource):\n @ns_conf.doc('get_endpoint')\n def get(self):\n '''Check endpoint'''\n return {'mindsdb': \"{__about__.__version__}\"}\n \n", "path": "mindsdb/api/http/namespaces/util.py"}]}
568
194
gh_patches_debug_11524
rasdani/github-patches
git_diff
Cog-Creators__Red-DiscordBot-1374
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [V3/Config] Clearing a registered data group when no data has been set results in a KeyError. Please be sure to read through other issues as well to make sure what you are suggesting/reporting has not already been suggested/reported ### Type: - [ ] Suggestion - [x] Bug ### Brief description of the problem When attempting to use methods such as `.clear_all_members` and `.clear_all_globals`, when no data has been set, will result in a KeyError being raised. ### Expected behavior Silently passes ### Actual behavior Key Error is raised. ### Steps to reproduce 1. register some data 2. try to clear all data from that scope 3. Key error is raised 4. You cry --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redbot/core/drivers/red_json.py` Content: ``` 1 from pathlib import Path 2 from typing import Tuple 3 4 from ..json_io import JsonIO 5 6 from .red_base import BaseDriver 7 8 __all__ = ["JSON"] 9 10 11 class JSON(BaseDriver): 12 """ 13 Subclass of :py:class:`.red_base.BaseDriver`. 14 15 .. py:attribute:: file_name 16 17 The name of the file in which to store JSON data. 18 19 .. py:attribute:: data_path 20 21 The path in which to store the file indicated by :py:attr:`file_name`. 22 """ 23 def __init__(self, cog_name, *, data_path_override: Path=None, 24 file_name_override: str="settings.json"): 25 super().__init__(cog_name) 26 self.file_name = file_name_override 27 if data_path_override: 28 self.data_path = data_path_override 29 else: 30 self.data_path = Path.cwd() / 'cogs' / '.data' / self.cog_name 31 32 self.data_path.mkdir(parents=True, exist_ok=True) 33 34 self.data_path = self.data_path / self.file_name 35 36 self.jsonIO = JsonIO(self.data_path) 37 38 try: 39 self.data = self.jsonIO._load_json() 40 except FileNotFoundError: 41 self.data = {} 42 self.jsonIO._save_json(self.data) 43 44 async def get(self, *identifiers: Tuple[str]): 45 partial = self.data 46 full_identifiers = (self.unique_cog_identifier, *identifiers) 47 for i in full_identifiers: 48 partial = partial[i] 49 return partial 50 51 async def set(self, *identifiers: str, value=None): 52 partial = self.data 53 full_identifiers = (self.unique_cog_identifier, *identifiers) 54 for i in full_identifiers[:-1]: 55 if i not in partial: 56 partial[i] = {} 57 partial = partial[i] 58 59 partial[full_identifiers[-1]] = value 60 await self.jsonIO._threadsafe_save_json(self.data) 61 62 async def clear(self, *identifiers: str): 63 partial = self.data 64 full_identifiers = (self.unique_cog_identifier, *identifiers) 65 for i in full_identifiers[:-1]: 66 if i not in partial: 67 break 68 partial = partial[i] 69 else: 70 del partial[identifiers[-1]] 71 await self.jsonIO._threadsafe_save_json(self.data) 72 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redbot/core/drivers/red_json.py b/redbot/core/drivers/red_json.py --- a/redbot/core/drivers/red_json.py +++ b/redbot/core/drivers/red_json.py @@ -62,10 +62,11 @@ async def clear(self, *identifiers: str): partial = self.data full_identifiers = (self.unique_cog_identifier, *identifiers) - for i in full_identifiers[:-1]: - if i not in partial: - break - partial = partial[i] - else: + try: + for i in full_identifiers[:-1]: + partial = partial[i] del partial[identifiers[-1]] - await self.jsonIO._threadsafe_save_json(self.data) + except KeyError: + pass + else: + await self.jsonIO._threadsafe_save_json(self.data)
{"golden_diff": "diff --git a/redbot/core/drivers/red_json.py b/redbot/core/drivers/red_json.py\n--- a/redbot/core/drivers/red_json.py\n+++ b/redbot/core/drivers/red_json.py\n@@ -62,10 +62,11 @@\n async def clear(self, *identifiers: str):\n partial = self.data\n full_identifiers = (self.unique_cog_identifier, *identifiers)\n- for i in full_identifiers[:-1]:\n- if i not in partial:\n- break\n- partial = partial[i]\n- else:\n+ try:\n+ for i in full_identifiers[:-1]:\n+ partial = partial[i]\n del partial[identifiers[-1]]\n- await self.jsonIO._threadsafe_save_json(self.data)\n+ except KeyError:\n+ pass\n+ else:\n+ await self.jsonIO._threadsafe_save_json(self.data)\n", "issue": "[V3/Config] Clearing a registered data group when no data has been set results in a KeyError.\nPlease be sure to read through other issues as well to make sure what you are suggesting/reporting has not already\r\nbeen suggested/reported\r\n\r\n### Type:\r\n\r\n- [ ] Suggestion\r\n- [x] Bug\r\n\r\n### Brief description of the problem\r\nWhen attempting to use methods such as `.clear_all_members` and `.clear_all_globals`, when no data has been set, will result in a KeyError being raised.\r\n### Expected behavior\r\nSilently passes\r\n### Actual behavior\r\nKey Error is raised.\r\n### Steps to reproduce\r\n\r\n1. register some data\r\n2. try to clear all data from that scope\r\n3. Key error is raised\r\n4. You cry\r\n\n", "before_files": [{"content": "from pathlib import Path\nfrom typing import Tuple\n\nfrom ..json_io import JsonIO\n\nfrom .red_base import BaseDriver\n\n__all__ = [\"JSON\"]\n\n\nclass JSON(BaseDriver):\n \"\"\"\n Subclass of :py:class:`.red_base.BaseDriver`.\n\n .. py:attribute:: file_name\n\n The name of the file in which to store JSON data.\n\n .. py:attribute:: data_path\n\n The path in which to store the file indicated by :py:attr:`file_name`.\n \"\"\"\n def __init__(self, cog_name, *, data_path_override: Path=None,\n file_name_override: str=\"settings.json\"):\n super().__init__(cog_name)\n self.file_name = file_name_override\n if data_path_override:\n self.data_path = data_path_override\n else:\n self.data_path = Path.cwd() / 'cogs' / '.data' / self.cog_name\n\n self.data_path.mkdir(parents=True, exist_ok=True)\n\n self.data_path = self.data_path / self.file_name\n\n self.jsonIO = JsonIO(self.data_path)\n\n try:\n self.data = self.jsonIO._load_json()\n except FileNotFoundError:\n self.data = {}\n self.jsonIO._save_json(self.data)\n\n async def get(self, *identifiers: Tuple[str]):\n partial = self.data\n full_identifiers = (self.unique_cog_identifier, *identifiers)\n for i in full_identifiers:\n partial = partial[i]\n return partial\n\n async def set(self, *identifiers: str, value=None):\n partial = self.data\n full_identifiers = (self.unique_cog_identifier, *identifiers)\n for i in full_identifiers[:-1]:\n if i not in partial:\n partial[i] = {}\n partial = partial[i]\n\n partial[full_identifiers[-1]] = value\n await self.jsonIO._threadsafe_save_json(self.data)\n\n async def clear(self, *identifiers: str):\n partial = self.data\n full_identifiers = (self.unique_cog_identifier, *identifiers)\n for i in full_identifiers[:-1]:\n if i not in partial:\n break\n partial = partial[i]\n else:\n del partial[identifiers[-1]]\n await self.jsonIO._threadsafe_save_json(self.data)\n", "path": "redbot/core/drivers/red_json.py"}], "after_files": [{"content": "from pathlib import Path\nfrom typing import Tuple\n\nfrom ..json_io import JsonIO\n\nfrom .red_base import BaseDriver\n\n__all__ = [\"JSON\"]\n\n\nclass JSON(BaseDriver):\n \"\"\"\n Subclass of :py:class:`.red_base.BaseDriver`.\n\n .. py:attribute:: file_name\n\n The name of the file in which to store JSON data.\n\n .. py:attribute:: data_path\n\n The path in which to store the file indicated by :py:attr:`file_name`.\n \"\"\"\n def __init__(self, cog_name, *, data_path_override: Path=None,\n file_name_override: str=\"settings.json\"):\n super().__init__(cog_name)\n self.file_name = file_name_override\n if data_path_override:\n self.data_path = data_path_override\n else:\n self.data_path = Path.cwd() / 'cogs' / '.data' / self.cog_name\n\n self.data_path.mkdir(parents=True, exist_ok=True)\n\n self.data_path = self.data_path / self.file_name\n\n self.jsonIO = JsonIO(self.data_path)\n\n try:\n self.data = self.jsonIO._load_json()\n except FileNotFoundError:\n self.data = {}\n self.jsonIO._save_json(self.data)\n\n async def get(self, *identifiers: Tuple[str]):\n partial = self.data\n full_identifiers = (self.unique_cog_identifier, *identifiers)\n for i in full_identifiers:\n partial = partial[i]\n return partial\n\n async def set(self, *identifiers: str, value=None):\n partial = self.data\n full_identifiers = (self.unique_cog_identifier, *identifiers)\n for i in full_identifiers[:-1]:\n if i not in partial:\n partial[i] = {}\n partial = partial[i]\n\n partial[full_identifiers[-1]] = value\n await self.jsonIO._threadsafe_save_json(self.data)\n\n async def clear(self, *identifiers: str):\n partial = self.data\n full_identifiers = (self.unique_cog_identifier, *identifiers)\n try:\n for i in full_identifiers[:-1]:\n partial = partial[i]\n del partial[identifiers[-1]]\n except KeyError:\n pass\n else:\n await self.jsonIO._threadsafe_save_json(self.data)\n", "path": "redbot/core/drivers/red_json.py"}]}
1,062
194
gh_patches_debug_7462
rasdani/github-patches
git_diff
arviz-devs__arviz-644
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- add api docs for plot_energy https://arviz-devs.github.io/arviz/generated/arviz.plot_energy.html#arviz.plot_energy See Posterior Docs for example https://arviz-devs.github.io/arviz/generated/arviz.plot_posterior.html#arviz.plot_posterior --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `arviz/plots/energyplot.py` Content: ``` 1 """Plot energy transition distribution in HMC inference.""" 2 import numpy as np 3 import matplotlib.pyplot as plt 4 5 from ..data import convert_to_dataset 6 from ..stats import bfmi as e_bfmi 7 from .kdeplot import plot_kde 8 from .plot_utils import _scale_fig_size 9 10 11 def plot_energy( 12 data, 13 kind="kde", 14 bfmi=True, 15 figsize=None, 16 legend=True, 17 fill_alpha=(1, 0.75), 18 fill_color=("C0", "C5"), 19 bw=4.5, 20 textsize=None, 21 fill_kwargs=None, 22 plot_kwargs=None, 23 ax=None, 24 ): 25 """Plot energy transition distribution and marginal energy distribution in HMC algorithms. 26 27 This may help to diagnose poor exploration by gradient-based algorithms like HMC or NUTS. 28 29 Parameters 30 ---------- 31 data : xarray dataset, or object that can be converted (must represent 32 `sample_stats` and have an `energy` variable) 33 kind : str 34 Type of plot to display (kde or histogram) 35 bfmi : bool 36 If True add to the plot the value of the estimated Bayesian fraction of missing information 37 figsize : tuple 38 Figure size. If None it will be defined automatically. 39 legend : bool 40 Flag for plotting legend (defaults to True) 41 fill_alpha : tuple of floats 42 Alpha blending value for the shaded area under the curve, between 0 43 (no shade) and 1 (opaque). Defaults to (1, .75) 44 fill_color : tuple of valid matplotlib color 45 Color for Marginal energy distribution and Energy transition distribution. 46 Defaults to ('C0', 'C5') 47 bw : float 48 Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the 49 smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule 50 of thumb (the default rule used by SciPy). Only works if `kind='kde'` 51 textsize: float 52 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based 53 on figsize. 54 fill_kwargs : dicts, optional 55 Additional keywords passed to `arviz.plot_kde` (to control the shade) 56 plot_kwargs : dicts, optional 57 Additional keywords passed to `arviz.plot_kde` or `plt.hist` (if type='hist') 58 ax : axes 59 Matplotlib axes. 60 61 Returns 62 ------- 63 ax : matplotlib axes 64 """ 65 energy = convert_to_dataset(data, group="sample_stats").energy.values 66 67 if ax is None: 68 _, ax = plt.subplots(figsize=figsize, constrained_layout=True) 69 70 if fill_kwargs is None: 71 fill_kwargs = {} 72 73 if plot_kwargs is None: 74 plot_kwargs = {} 75 76 figsize, _, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1) 77 78 series = zip( 79 fill_alpha, 80 fill_color, 81 ("Marginal Energy", "Energy transition"), 82 (energy - energy.mean(), np.diff(energy)), 83 ) 84 85 if kind == "kde": 86 for alpha, color, label, value in series: 87 fill_kwargs["alpha"] = alpha 88 fill_kwargs["color"] = color 89 plot_kwargs.setdefault("color", color) 90 plot_kwargs.setdefault("alpha", 0) 91 plot_kwargs.setdefault("linewidth", linewidth) 92 plot_kde( 93 value, 94 bw=bw, 95 label=label, 96 textsize=xt_labelsize, 97 plot_kwargs=plot_kwargs, 98 fill_kwargs=fill_kwargs, 99 ax=ax, 100 ) 101 102 elif kind == "hist": 103 for alpha, color, label, value in series: 104 ax.hist( 105 value.flatten(), 106 bins="auto", 107 density=True, 108 alpha=alpha, 109 label=label, 110 color=color, 111 **plot_kwargs 112 ) 113 114 else: 115 raise ValueError("Plot type {} not recognized.".format(kind)) 116 117 if bfmi: 118 for idx, val in enumerate(e_bfmi(energy)): 119 ax.plot([], label="chain {:>2} BFMI = {:.2f}".format(idx, val), alpha=0) 120 121 ax.set_xticks([]) 122 ax.set_yticks([]) 123 124 if legend: 125 ax.legend() 126 127 return ax 128 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/arviz/plots/energyplot.py b/arviz/plots/energyplot.py --- a/arviz/plots/energyplot.py +++ b/arviz/plots/energyplot.py @@ -61,6 +61,25 @@ Returns ------- ax : matplotlib axes + + Examples + -------- + Plot a default energy plot + + .. plot:: + :context: close-figs + + >>> import arviz as az + >>> data = az.load_arviz_data('centered_eight') + >>> az.plot_energy(data) + + Represent energy plot via histograms + + .. plot:: + :context: close-figs + + >>> az.plot_energy(data, kind='hist') + """ energy = convert_to_dataset(data, group="sample_stats").energy.values
{"golden_diff": "diff --git a/arviz/plots/energyplot.py b/arviz/plots/energyplot.py\n--- a/arviz/plots/energyplot.py\n+++ b/arviz/plots/energyplot.py\n@@ -61,6 +61,25 @@\n Returns\n -------\n ax : matplotlib axes\n+\n+ Examples\n+ --------\n+ Plot a default energy plot\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> import arviz as az\n+ >>> data = az.load_arviz_data('centered_eight')\n+ >>> az.plot_energy(data)\n+\n+ Represent energy plot via histograms\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> az.plot_energy(data, kind='hist')\n+\n \"\"\"\n energy = convert_to_dataset(data, group=\"sample_stats\").energy.values\n", "issue": "add api docs for plot_energy\nhttps://arviz-devs.github.io/arviz/generated/arviz.plot_energy.html#arviz.plot_energy\r\n\r\nSee Posterior Docs for example\r\nhttps://arviz-devs.github.io/arviz/generated/arviz.plot_posterior.html#arviz.plot_posterior\n", "before_files": [{"content": "\"\"\"Plot energy transition distribution in HMC inference.\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ..data import convert_to_dataset\nfrom ..stats import bfmi as e_bfmi\nfrom .kdeplot import plot_kde\nfrom .plot_utils import _scale_fig_size\n\n\ndef plot_energy(\n data,\n kind=\"kde\",\n bfmi=True,\n figsize=None,\n legend=True,\n fill_alpha=(1, 0.75),\n fill_color=(\"C0\", \"C5\"),\n bw=4.5,\n textsize=None,\n fill_kwargs=None,\n plot_kwargs=None,\n ax=None,\n):\n \"\"\"Plot energy transition distribution and marginal energy distribution in HMC algorithms.\n\n This may help to diagnose poor exploration by gradient-based algorithms like HMC or NUTS.\n\n Parameters\n ----------\n data : xarray dataset, or object that can be converted (must represent\n `sample_stats` and have an `energy` variable)\n kind : str\n Type of plot to display (kde or histogram)\n bfmi : bool\n If True add to the plot the value of the estimated Bayesian fraction of missing information\n figsize : tuple\n Figure size. If None it will be defined automatically.\n legend : bool\n Flag for plotting legend (defaults to True)\n fill_alpha : tuple of floats\n Alpha blending value for the shaded area under the curve, between 0\n (no shade) and 1 (opaque). Defaults to (1, .75)\n fill_color : tuple of valid matplotlib color\n Color for Marginal energy distribution and Energy transition distribution.\n Defaults to ('C0', 'C5')\n bw : float\n Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the\n smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule\n of thumb (the default rule used by SciPy). Only works if `kind='kde'`\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n fill_kwargs : dicts, optional\n Additional keywords passed to `arviz.plot_kde` (to control the shade)\n plot_kwargs : dicts, optional\n Additional keywords passed to `arviz.plot_kde` or `plt.hist` (if type='hist')\n ax : axes\n Matplotlib axes.\n\n Returns\n -------\n ax : matplotlib axes\n \"\"\"\n energy = convert_to_dataset(data, group=\"sample_stats\").energy.values\n\n if ax is None:\n _, ax = plt.subplots(figsize=figsize, constrained_layout=True)\n\n if fill_kwargs is None:\n fill_kwargs = {}\n\n if plot_kwargs is None:\n plot_kwargs = {}\n\n figsize, _, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)\n\n series = zip(\n fill_alpha,\n fill_color,\n (\"Marginal Energy\", \"Energy transition\"),\n (energy - energy.mean(), np.diff(energy)),\n )\n\n if kind == \"kde\":\n for alpha, color, label, value in series:\n fill_kwargs[\"alpha\"] = alpha\n fill_kwargs[\"color\"] = color\n plot_kwargs.setdefault(\"color\", color)\n plot_kwargs.setdefault(\"alpha\", 0)\n plot_kwargs.setdefault(\"linewidth\", linewidth)\n plot_kde(\n value,\n bw=bw,\n label=label,\n textsize=xt_labelsize,\n plot_kwargs=plot_kwargs,\n fill_kwargs=fill_kwargs,\n ax=ax,\n )\n\n elif kind == \"hist\":\n for alpha, color, label, value in series:\n ax.hist(\n value.flatten(),\n bins=\"auto\",\n density=True,\n alpha=alpha,\n label=label,\n color=color,\n **plot_kwargs\n )\n\n else:\n raise ValueError(\"Plot type {} not recognized.\".format(kind))\n\n if bfmi:\n for idx, val in enumerate(e_bfmi(energy)):\n ax.plot([], label=\"chain {:>2} BFMI = {:.2f}\".format(idx, val), alpha=0)\n\n ax.set_xticks([])\n ax.set_yticks([])\n\n if legend:\n ax.legend()\n\n return ax\n", "path": "arviz/plots/energyplot.py"}], "after_files": [{"content": "\"\"\"Plot energy transition distribution in HMC inference.\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ..data import convert_to_dataset\nfrom ..stats import bfmi as e_bfmi\nfrom .kdeplot import plot_kde\nfrom .plot_utils import _scale_fig_size\n\n\ndef plot_energy(\n data,\n kind=\"kde\",\n bfmi=True,\n figsize=None,\n legend=True,\n fill_alpha=(1, 0.75),\n fill_color=(\"C0\", \"C5\"),\n bw=4.5,\n textsize=None,\n fill_kwargs=None,\n plot_kwargs=None,\n ax=None,\n):\n \"\"\"Plot energy transition distribution and marginal energy distribution in HMC algorithms.\n\n This may help to diagnose poor exploration by gradient-based algorithms like HMC or NUTS.\n\n Parameters\n ----------\n data : xarray dataset, or object that can be converted (must represent\n `sample_stats` and have an `energy` variable)\n kind : str\n Type of plot to display (kde or histogram)\n bfmi : bool\n If True add to the plot the value of the estimated Bayesian fraction of missing information\n figsize : tuple\n Figure size. If None it will be defined automatically.\n legend : bool\n Flag for plotting legend (defaults to True)\n fill_alpha : tuple of floats\n Alpha blending value for the shaded area under the curve, between 0\n (no shade) and 1 (opaque). Defaults to (1, .75)\n fill_color : tuple of valid matplotlib color\n Color for Marginal energy distribution and Energy transition distribution.\n Defaults to ('C0', 'C5')\n bw : float\n Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the\n smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule\n of thumb (the default rule used by SciPy). Only works if `kind='kde'`\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n fill_kwargs : dicts, optional\n Additional keywords passed to `arviz.plot_kde` (to control the shade)\n plot_kwargs : dicts, optional\n Additional keywords passed to `arviz.plot_kde` or `plt.hist` (if type='hist')\n ax : axes\n Matplotlib axes.\n\n Returns\n -------\n ax : matplotlib axes\n\n Examples\n --------\n Plot a default energy plot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> data = az.load_arviz_data('centered_eight')\n >>> az.plot_energy(data)\n\n Represent energy plot via histograms\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_energy(data, kind='hist')\n\n \"\"\"\n energy = convert_to_dataset(data, group=\"sample_stats\").energy.values\n\n if ax is None:\n _, ax = plt.subplots(figsize=figsize, constrained_layout=True)\n\n if fill_kwargs is None:\n fill_kwargs = {}\n\n if plot_kwargs is None:\n plot_kwargs = {}\n\n figsize, _, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)\n\n series = zip(\n fill_alpha,\n fill_color,\n (\"Marginal Energy\", \"Energy transition\"),\n (energy - energy.mean(), np.diff(energy)),\n )\n\n if kind == \"kde\":\n for alpha, color, label, value in series:\n fill_kwargs[\"alpha\"] = alpha\n fill_kwargs[\"color\"] = color\n plot_kwargs.setdefault(\"color\", color)\n plot_kwargs.setdefault(\"alpha\", 0)\n plot_kwargs.setdefault(\"linewidth\", linewidth)\n plot_kde(\n value,\n bw=bw,\n label=label,\n textsize=xt_labelsize,\n plot_kwargs=plot_kwargs,\n fill_kwargs=fill_kwargs,\n ax=ax,\n )\n\n elif kind == \"hist\":\n for alpha, color, label, value in series:\n ax.hist(\n value.flatten(),\n bins=\"auto\",\n density=True,\n alpha=alpha,\n label=label,\n color=color,\n **plot_kwargs\n )\n\n else:\n raise ValueError(\"Plot type {} not recognized.\".format(kind))\n\n if bfmi:\n for idx, val in enumerate(e_bfmi(energy)):\n ax.plot([], label=\"chain {:>2} BFMI = {:.2f}\".format(idx, val), alpha=0)\n\n ax.set_xticks([])\n ax.set_yticks([])\n\n if legend:\n ax.legend()\n\n return ax\n", "path": "arviz/plots/energyplot.py"}]}
1,536
191
gh_patches_debug_14517
rasdani/github-patches
git_diff
Qiskit__qiskit-862
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make interactive visualizations default for Jupyter <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. --> ### What is the expected enhancement? The interactive visualizations contributed in PR #765 are much better than the static ones, and should be default when the environment is a Jupyter notebook. This is related to Epic #707 for providing better jupyter tools. Several issues must be solved first: - These visualizations need internet connection. Can we eliminate that? - The interface of the static and interactive visualizations are all the same, except for `plot_histogram` vs `iplot_histogram`. These should be made similar for consistency. But the interactive visualization is able to plot multiple histograms in one plot. - How "heavy" is it to run these interactive visualizations? I hear my laptop fan. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qiskit/tools/visualization/__init__.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Copyright 2018, IBM. 4 # 5 # This source code is licensed under the Apache License, Version 2.0 found in 6 # the LICENSE.txt file in the root directory of this source tree. 7 8 """Main QISKit visualization methods.""" 9 10 from ._circuit_visualization import circuit_drawer, plot_circuit, generate_latex_source,\ 11 latex_circuit_drawer, matplotlib_circuit_drawer, qx_color_scheme 12 from ._state_visualization import plot_state 13 from ._counts_visualization import plot_histogram 14 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qiskit/tools/visualization/__init__.py b/qiskit/tools/visualization/__init__.py --- a/qiskit/tools/visualization/__init__.py +++ b/qiskit/tools/visualization/__init__.py @@ -7,7 +7,18 @@ """Main QISKit visualization methods.""" +import sys + from ._circuit_visualization import circuit_drawer, plot_circuit, generate_latex_source,\ latex_circuit_drawer, matplotlib_circuit_drawer, qx_color_scheme -from ._state_visualization import plot_state from ._counts_visualization import plot_histogram + +if ('ipykernel' in sys.modules) and ('spyder' not in sys.modules): + import requests + if requests.get( + 'https://qvisualization.mybluemix.net/').status_code == 200: + from .interactive._iplot_state import iplot_state as plot_state + else: + from ._state_visualization import plot_state +else: + from ._state_visualization import plot_state
{"golden_diff": "diff --git a/qiskit/tools/visualization/__init__.py b/qiskit/tools/visualization/__init__.py\n--- a/qiskit/tools/visualization/__init__.py\n+++ b/qiskit/tools/visualization/__init__.py\n@@ -7,7 +7,18 @@\n \n \"\"\"Main QISKit visualization methods.\"\"\"\n \n+import sys\n+\n from ._circuit_visualization import circuit_drawer, plot_circuit, generate_latex_source,\\\n latex_circuit_drawer, matplotlib_circuit_drawer, qx_color_scheme\n-from ._state_visualization import plot_state\n from ._counts_visualization import plot_histogram\n+\n+if ('ipykernel' in sys.modules) and ('spyder' not in sys.modules):\n+ import requests\n+ if requests.get(\n+ 'https://qvisualization.mybluemix.net/').status_code == 200:\n+ from .interactive._iplot_state import iplot_state as plot_state\n+ else:\n+ from ._state_visualization import plot_state\n+else:\n+ from ._state_visualization import plot_state\n", "issue": "Make interactive visualizations default for Jupyter\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\nThe interactive visualizations contributed in PR #765 are much better than the static ones, and should be default when the environment is a Jupyter notebook. This is related to Epic #707 for providing better jupyter tools.\r\n\r\nSeveral issues must be solved first:\r\n- These visualizations need internet connection. Can we eliminate that?\r\n- The interface of the static and interactive visualizations are all the same, except for `plot_histogram` vs `iplot_histogram`. These should be made similar for consistency. But the interactive visualization is able to plot multiple histograms in one plot.\r\n- How \"heavy\" is it to run these interactive visualizations? I hear my laptop fan.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"Main QISKit visualization methods.\"\"\"\n\nfrom ._circuit_visualization import circuit_drawer, plot_circuit, generate_latex_source,\\\n latex_circuit_drawer, matplotlib_circuit_drawer, qx_color_scheme\nfrom ._state_visualization import plot_state\nfrom ._counts_visualization import plot_histogram\n", "path": "qiskit/tools/visualization/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"Main QISKit visualization methods.\"\"\"\n\nimport sys\n\nfrom ._circuit_visualization import circuit_drawer, plot_circuit, generate_latex_source,\\\n latex_circuit_drawer, matplotlib_circuit_drawer, qx_color_scheme\nfrom ._counts_visualization import plot_histogram\n\nif ('ipykernel' in sys.modules) and ('spyder' not in sys.modules):\n import requests\n if requests.get(\n 'https://qvisualization.mybluemix.net/').status_code == 200:\n from .interactive._iplot_state import iplot_state as plot_state\n else:\n from ._state_visualization import plot_state\nelse:\n from ._state_visualization import plot_state\n", "path": "qiskit/tools/visualization/__init__.py"}]}
593
230
gh_patches_debug_58539
rasdani/github-patches
git_diff
ansible__molecule-4038
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- created: true is ignored in state.yml # Issue Type - Bug report # Molecule and Ansible details ``` ansible --version && molecule --version ansible [core 2.15.3] config file = None configured module search path = ['/home/manu/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/lib/python3.11/site-packages/ansible ansible collection location = /home/manu/.ansible/collections:/usr/share/ansible/collections executable location = /home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/bin/ansible python version = 3.11.5 (main, Aug 28 2023, 20:02:58) [GCC 13.2.1 20230801] (/home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/bin/python) jinja version = 3.1.2 libyaml = True molecule 6.0.2 using python 3.11 ansible:2.15.3 default:6.0.2 from molecule molecule-qemu:0.5.3 from molecule_qemu ``` Molecule installation method: source Ansible installation method: pip # Desired Behavior In molecule v5, the `create` step was automatically skipped when `created: true` in `<XDG_CACHE_HOME>/molecule//<scenario>/state.yml` with the message `WARNING Skipping, instances already created.`. This is the desired behavior. Here an example with a simple hello_world role after a `molecule create` execution. The molecule-qemu plugin is used here. ``` ❯ molecule converge INFO default scenario test matrix: dependency, create, prepare, converge INFO Performing prerun with role_name_check=0... INFO Set ANSIBLE_LIBRARY=/home/manu/.cache/ansible-compat/35072c/modules:/home/manu/.ansible/plugins/modules:/usr/share/ansible/plugins/modules INFO Set ANSIBLE_COLLECTIONS_PATH=/home/manu/.cache/ansible-compat/35072c/collections:/home/manu/.ansible/collections:/usr/share/ansible/collections INFO Set ANSIBLE_ROLES_PATH=/home/manu/.cache/ansible-compat/35072c/roles:/home/manu/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles INFO Running default > dependency WARNING Skipping, missing the requirements file. WARNING Skipping, missing the requirements file. INFO Running default > create WARNING Skipping, instances already created. INFO Running default > prepare WARNING Skipping, prepare playbook not configured. INFO Running default > converge PLAY [all] ********************************************************************* TASK [hello_world : Hello world] *********************************************** ok: [test-hello-world] => { "msg": "Hello, world!" } PLAY RECAP ********************************************************************* test-hello-world : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ``` # Actual Behaviour Since molecule v6, the `WARNING Skipping, instances already created.` message is no longer displayed and the create step is executed each time the `molecule converge` is called. Here an example with the same role, with the same conditions except that molecule version is 6.0.2. ``` INFO default scenario test matrix: dependency, create, prepare, converge INFO Performing prerun with role_name_check=0... INFO Running default > dependency WARNING Skipping, missing the requirements file. WARNING Skipping, missing the requirements file. INFO Running default > create PLAY [Create] ****************************************************************** TASK [Gather only necessary facts] ********************************************* ok: [localhost] TASK [Register VMs data] ******************************************************* ok: [localhost] => (item=test-hello-world) TASK [Prepare VMs data] ******************************************************** ok: [localhost] ... TASK [Dump VMs config] ********************************************************* ok: [localhost] PLAY RECAP ********************************************************************* localhost : ok=31 changed=0 unreachable=0 failed=0 skipped=5 rescued=0 ignored=0 INFO Running default > prepare WARNING Skipping, prepare playbook not configured. INFO Running default > converge PLAY [all] ********************************************************************* TASK [hello_world : Hello world] *********************************************** ok: [test-hello-world] => { "msg": "Hello, world!" } PLAY RECAP ********************************************************************* test-hello-world : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/molecule/command/create.py` Content: ``` 1 # Copyright (c) 2015-2018 Cisco Systems, Inc. 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to 5 # deal in the Software without restriction, including without limitation the 6 # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 # sell copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 # DEALINGS IN THE SOFTWARE. 20 """Create Command Module.""" 21 22 import logging 23 24 import click 25 26 from molecule.api import drivers 27 from molecule.command import base 28 from molecule.config import DEFAULT_DRIVER 29 30 LOG = logging.getLogger(__name__) 31 32 33 class Create(base.Base): 34 """Create Command Class.""" 35 36 def execute(self, action_args=None): 37 """Execute the actions necessary to perform a `molecule create` and \ 38 returns None. 39 40 :return: None 41 """ 42 self._config.state.change_state("driver", self._config.driver.name) 43 44 self._config.provisioner.create() 45 46 self._config.state.change_state("created", True) 47 48 49 @base.click_command_ex() 50 @click.pass_context 51 @click.option( 52 "--scenario-name", 53 "-s", 54 default=base.MOLECULE_DEFAULT_SCENARIO_NAME, 55 help=f"Name of the scenario to target. ({base.MOLECULE_DEFAULT_SCENARIO_NAME})", 56 ) 57 @click.option( 58 "--driver-name", 59 "-d", 60 type=click.Choice([str(s) for s in drivers()]), 61 help=f"Name of driver to use. ({DEFAULT_DRIVER})", 62 ) 63 def create(ctx, scenario_name, driver_name): # pragma: no cover 64 """Use the provisioner to start the instances.""" 65 args = ctx.obj.get("args") 66 subcommand = base._get_subcommand(__name__) 67 command_args = {"subcommand": subcommand, "driver_name": driver_name} 68 69 base.execute_cmdline_scenarios(scenario_name, args, command_args) 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/molecule/command/create.py b/src/molecule/command/create.py --- a/src/molecule/command/create.py +++ b/src/molecule/command/create.py @@ -41,6 +41,11 @@ """ self._config.state.change_state("driver", self._config.driver.name) + if self._config.state.created: + msg = "Skipping, instances already created." + LOG.warning(msg) + return + self._config.provisioner.create() self._config.state.change_state("created", True)
{"golden_diff": "diff --git a/src/molecule/command/create.py b/src/molecule/command/create.py\n--- a/src/molecule/command/create.py\n+++ b/src/molecule/command/create.py\n@@ -41,6 +41,11 @@\n \"\"\"\n self._config.state.change_state(\"driver\", self._config.driver.name)\n \n+ if self._config.state.created:\n+ msg = \"Skipping, instances already created.\"\n+ LOG.warning(msg)\n+ return\n+\n self._config.provisioner.create()\n \n self._config.state.change_state(\"created\", True)\n", "issue": "created: true is ignored in state.yml\n# Issue Type\r\n\r\n- Bug report\r\n\r\n# Molecule and Ansible details\r\n\r\n```\r\nansible --version && molecule --version\r\nansible [core 2.15.3]\r\n config file = None\r\n configured module search path = ['/home/manu/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/lib/python3.11/site-packages/ansible\r\n ansible collection location = /home/manu/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/bin/ansible\r\n python version = 3.11.5 (main, Aug 28 2023, 20:02:58) [GCC 13.2.1 20230801] (/home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/bin/python)\r\n jinja version = 3.1.2\r\n libyaml = True\r\nmolecule 6.0.2 using python 3.11\r\n ansible:2.15.3\r\n default:6.0.2 from molecule\r\n molecule-qemu:0.5.3 from molecule_qemu\r\n```\r\n\r\nMolecule installation method: source\r\nAnsible installation method: pip\r\n\r\n# Desired Behavior\r\n\r\nIn molecule v5, the `create` step was automatically skipped when `created: true` in `<XDG_CACHE_HOME>/molecule//<scenario>/state.yml` with the message `WARNING Skipping, instances already created.`. This is the desired behavior.\r\n\r\nHere an example with a simple hello_world role after a `molecule create` execution. The molecule-qemu plugin is used here.\r\n```\r\n\u276f molecule converge\r\nINFO default scenario test matrix: dependency, create, prepare, converge\r\nINFO Performing prerun with role_name_check=0...\r\nINFO Set ANSIBLE_LIBRARY=/home/manu/.cache/ansible-compat/35072c/modules:/home/manu/.ansible/plugins/modules:/usr/share/ansible/plugins/modules\r\nINFO Set ANSIBLE_COLLECTIONS_PATH=/home/manu/.cache/ansible-compat/35072c/collections:/home/manu/.ansible/collections:/usr/share/ansible/collections\r\nINFO Set ANSIBLE_ROLES_PATH=/home/manu/.cache/ansible-compat/35072c/roles:/home/manu/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\r\nINFO Running default > dependency\r\nWARNING Skipping, missing the requirements file.\r\nWARNING Skipping, missing the requirements file.\r\nINFO Running default > create\r\nWARNING Skipping, instances already created.\r\nINFO Running default > prepare\r\nWARNING Skipping, prepare playbook not configured.\r\nINFO Running default > converge\r\n\r\nPLAY [all] *********************************************************************\r\n\r\nTASK [hello_world : Hello world] ***********************************************\r\nok: [test-hello-world] => {\r\n \"msg\": \"Hello, world!\"\r\n}\r\n\r\nPLAY RECAP *********************************************************************\r\ntest-hello-world : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0\r\n```\r\n\r\n# Actual Behaviour\r\n\r\nSince molecule v6, the `WARNING Skipping, instances already created.` message is no longer displayed and the create step is executed each time the `molecule converge` is called.\r\n\r\nHere an example with the same role, with the same conditions except that molecule version is 6.0.2.\r\n```\r\nINFO default scenario test matrix: dependency, create, prepare, converge\r\nINFO Performing prerun with role_name_check=0...\r\nINFO Running default > dependency\r\nWARNING Skipping, missing the requirements file.\r\nWARNING Skipping, missing the requirements file.\r\nINFO Running default > create\r\n\r\nPLAY [Create] ******************************************************************\r\n\r\nTASK [Gather only necessary facts] *********************************************\r\nok: [localhost]\r\n\r\nTASK [Register VMs data] *******************************************************\r\nok: [localhost] => (item=test-hello-world)\r\n\r\nTASK [Prepare VMs data] ********************************************************\r\nok: [localhost]\r\n...\r\nTASK [Dump VMs config] *********************************************************\r\nok: [localhost]\r\n\r\nPLAY RECAP *********************************************************************\r\nlocalhost : ok=31 changed=0 unreachable=0 failed=0 skipped=5 rescued=0 ignored=0\r\n\r\nINFO Running default > prepare\r\nWARNING Skipping, prepare playbook not configured.\r\nINFO Running default > converge\r\n\r\nPLAY [all] *********************************************************************\r\n\r\nTASK [hello_world : Hello world] ***********************************************\r\nok: [test-hello-world] => {\r\n \"msg\": \"Hello, world!\"\r\n}\r\n\r\nPLAY RECAP *********************************************************************\r\ntest-hello-world : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0\r\n```\n", "before_files": [{"content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\"\"\"Create Command Module.\"\"\"\n\nimport logging\n\nimport click\n\nfrom molecule.api import drivers\nfrom molecule.command import base\nfrom molecule.config import DEFAULT_DRIVER\n\nLOG = logging.getLogger(__name__)\n\n\nclass Create(base.Base):\n \"\"\"Create Command Class.\"\"\"\n\n def execute(self, action_args=None):\n \"\"\"Execute the actions necessary to perform a `molecule create` and \\\n returns None.\n\n :return: None\n \"\"\"\n self._config.state.change_state(\"driver\", self._config.driver.name)\n\n self._config.provisioner.create()\n\n self._config.state.change_state(\"created\", True)\n\n\n@base.click_command_ex()\n@click.pass_context\n@click.option(\n \"--scenario-name\",\n \"-s\",\n default=base.MOLECULE_DEFAULT_SCENARIO_NAME,\n help=f\"Name of the scenario to target. ({base.MOLECULE_DEFAULT_SCENARIO_NAME})\",\n)\n@click.option(\n \"--driver-name\",\n \"-d\",\n type=click.Choice([str(s) for s in drivers()]),\n help=f\"Name of driver to use. ({DEFAULT_DRIVER})\",\n)\ndef create(ctx, scenario_name, driver_name): # pragma: no cover\n \"\"\"Use the provisioner to start the instances.\"\"\"\n args = ctx.obj.get(\"args\")\n subcommand = base._get_subcommand(__name__)\n command_args = {\"subcommand\": subcommand, \"driver_name\": driver_name}\n\n base.execute_cmdline_scenarios(scenario_name, args, command_args)\n", "path": "src/molecule/command/create.py"}], "after_files": [{"content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\"\"\"Create Command Module.\"\"\"\n\nimport logging\n\nimport click\n\nfrom molecule.api import drivers\nfrom molecule.command import base\nfrom molecule.config import DEFAULT_DRIVER\n\nLOG = logging.getLogger(__name__)\n\n\nclass Create(base.Base):\n \"\"\"Create Command Class.\"\"\"\n\n def execute(self, action_args=None):\n \"\"\"Execute the actions necessary to perform a `molecule create` and \\\n returns None.\n\n :return: None\n \"\"\"\n self._config.state.change_state(\"driver\", self._config.driver.name)\n\n if self._config.state.created:\n msg = \"Skipping, instances already created.\"\n LOG.warning(msg)\n return\n\n self._config.provisioner.create()\n\n self._config.state.change_state(\"created\", True)\n\n\n@base.click_command_ex()\n@click.pass_context\n@click.option(\n \"--scenario-name\",\n \"-s\",\n default=base.MOLECULE_DEFAULT_SCENARIO_NAME,\n help=f\"Name of the scenario to target. ({base.MOLECULE_DEFAULT_SCENARIO_NAME})\",\n)\n@click.option(\n \"--driver-name\",\n \"-d\",\n type=click.Choice([str(s) for s in drivers()]),\n help=f\"Name of driver to use. ({DEFAULT_DRIVER})\",\n)\ndef create(ctx, scenario_name, driver_name): # pragma: no cover\n \"\"\"Use the provisioner to start the instances.\"\"\"\n args = ctx.obj.get(\"args\")\n subcommand = base._get_subcommand(__name__)\n command_args = {\"subcommand\": subcommand, \"driver_name\": driver_name}\n\n base.execute_cmdline_scenarios(scenario_name, args, command_args)\n", "path": "src/molecule/command/create.py"}]}
2,034
121
gh_patches_debug_3440
rasdani/github-patches
git_diff
spack__spack-21932
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- shell test kcov suggestion does not work ### Steps to reproduce the issue ```console $ ./share/spack/qa/run-shell-tests ERROR: kcov is required to run this script. To install with Spack, run: $ spack install kcov Then add the bin directory to your PATH. $ spack load kcov $ ./share/spack/qa/run-shell-tests ERROR: kcov is required to run this script. To install with Spack, run: $ spack install kcov Then add the bin directory to your PATH. ``` Seems like there is no kcov binary: ``` $ kcov Command 'kcov' not found, but can be installed with: sudo apt install kcov ``` * **Spack:** 0.16.0-1501-e8eb76c198 * **Python:** 3.8.7 * **Platform:** linux-ubuntu20.04-zen2 * **Concretizer:** clingo ### Additional information - [x] I have run `spack debug report` and reported the version of Spack/Python/Platform - [x] I have searched the issues of this repo and believe this is not a duplicate - [x] I have run the failing commands in debug mode and reported the output --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `var/spack/repos/builtin/packages/kcov/package.py` Content: ``` 1 # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 from spack import * 7 8 9 class Kcov(CMakePackage): 10 """Code coverage tool for compiled programs, Python and Bash which uses 11 debugging information to collect and report data without special 12 compilation options""" 13 14 homepage = "http://simonkagstrom.github.io/kcov/index.html" 15 url = "https://github.com/SimonKagstrom/kcov/archive/38.tar.gz" 16 17 version('38', sha256='b37af60d81a9b1e3b140f9473bdcb7975af12040feb24cc666f9bb2bb0be68b4') 18 19 depends_on('cmake@2.8.4:', type='build') 20 depends_on('zlib') 21 depends_on('curl') 22 23 def cmake_args(self): 24 # Necessary at least on macOS, fixes linking error to LLDB 25 # https://github.com/Homebrew/homebrew-core/blob/master/Formula/kcov.rb 26 return ['-DSPECIFY_RPATH=ON'] 27 28 @run_after('install') 29 @on_package_attributes(run_tests=True) 30 def test_install(self): 31 # The help message exits with an exit code of 1 32 kcov = Executable(self.prefix.bin.kcov) 33 kcov('-h', ignore_errors=1) 34 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/var/spack/repos/builtin/packages/kcov/package.py b/var/spack/repos/builtin/packages/kcov/package.py --- a/var/spack/repos/builtin/packages/kcov/package.py +++ b/var/spack/repos/builtin/packages/kcov/package.py @@ -19,6 +19,8 @@ depends_on('cmake@2.8.4:', type='build') depends_on('zlib') depends_on('curl') + depends_on('elfutils') + depends_on('binutils +libiberty', type='link') def cmake_args(self): # Necessary at least on macOS, fixes linking error to LLDB
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/kcov/package.py b/var/spack/repos/builtin/packages/kcov/package.py\n--- a/var/spack/repos/builtin/packages/kcov/package.py\n+++ b/var/spack/repos/builtin/packages/kcov/package.py\n@@ -19,6 +19,8 @@\n depends_on('cmake@2.8.4:', type='build')\n depends_on('zlib')\n depends_on('curl')\n+ depends_on('elfutils')\n+ depends_on('binutils +libiberty', type='link')\n \n def cmake_args(self):\n # Necessary at least on macOS, fixes linking error to LLDB\n", "issue": "shell test kcov suggestion does not work\n### Steps to reproduce the issue\r\n\r\n```console\r\n$ ./share/spack/qa/run-shell-tests \r\nERROR: kcov is required to run this script.\r\n\r\nTo install with Spack, run:\r\n $ spack install kcov\r\nThen add the bin directory to your PATH.\r\n$ spack load kcov\r\n$ ./share/spack/qa/run-shell-tests \r\nERROR: kcov is required to run this script.\r\n\r\nTo install with Spack, run:\r\n $ spack install kcov\r\nThen add the bin directory to your PATH.\r\n```\r\n\r\nSeems like there is no kcov binary:\r\n\r\n```\r\n$ kcov\r\n\r\nCommand 'kcov' not found, but can be installed with:\r\n\r\nsudo apt install kcov\r\n```\r\n\r\n* **Spack:** 0.16.0-1501-e8eb76c198\r\n* **Python:** 3.8.7\r\n* **Platform:** linux-ubuntu20.04-zen2\r\n* **Concretizer:** clingo\r\n\r\n### Additional information\r\n\r\n- [x] I have run `spack debug report` and reported the version of Spack/Python/Platform\r\n- [x] I have searched the issues of this repo and believe this is not a duplicate\r\n- [x] I have run the failing commands in debug mode and reported the output\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Kcov(CMakePackage):\n \"\"\"Code coverage tool for compiled programs, Python and Bash which uses\n debugging information to collect and report data without special\n compilation options\"\"\"\n\n homepage = \"http://simonkagstrom.github.io/kcov/index.html\"\n url = \"https://github.com/SimonKagstrom/kcov/archive/38.tar.gz\"\n\n version('38', sha256='b37af60d81a9b1e3b140f9473bdcb7975af12040feb24cc666f9bb2bb0be68b4')\n\n depends_on('cmake@2.8.4:', type='build')\n depends_on('zlib')\n depends_on('curl')\n\n def cmake_args(self):\n # Necessary at least on macOS, fixes linking error to LLDB\n # https://github.com/Homebrew/homebrew-core/blob/master/Formula/kcov.rb\n return ['-DSPECIFY_RPATH=ON']\n\n @run_after('install')\n @on_package_attributes(run_tests=True)\n def test_install(self):\n # The help message exits with an exit code of 1\n kcov = Executable(self.prefix.bin.kcov)\n kcov('-h', ignore_errors=1)\n", "path": "var/spack/repos/builtin/packages/kcov/package.py"}], "after_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Kcov(CMakePackage):\n \"\"\"Code coverage tool for compiled programs, Python and Bash which uses\n debugging information to collect and report data without special\n compilation options\"\"\"\n\n homepage = \"http://simonkagstrom.github.io/kcov/index.html\"\n url = \"https://github.com/SimonKagstrom/kcov/archive/38.tar.gz\"\n\n version('38', sha256='b37af60d81a9b1e3b140f9473bdcb7975af12040feb24cc666f9bb2bb0be68b4')\n\n depends_on('cmake@2.8.4:', type='build')\n depends_on('zlib')\n depends_on('curl')\n depends_on('elfutils')\n depends_on('binutils +libiberty', type='link')\n\n def cmake_args(self):\n # Necessary at least on macOS, fixes linking error to LLDB\n # https://github.com/Homebrew/homebrew-core/blob/master/Formula/kcov.rb\n return ['-DSPECIFY_RPATH=ON']\n\n @run_after('install')\n @on_package_attributes(run_tests=True)\n def test_install(self):\n # The help message exits with an exit code of 1\n kcov = Executable(self.prefix.bin.kcov)\n kcov('-h', ignore_errors=1)\n", "path": "var/spack/repos/builtin/packages/kcov/package.py"}]}
965
144
gh_patches_debug_23351
rasdani/github-patches
git_diff
pwndbg__pwndbg-671
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- subprocess.CalledProcessError: Command '['/usr/local/bin/checksec', '--file', '/tmp/xxx']' returned non-zero exit status 2. ``` pwndbg> checksec Traceback (most recent call last): File "/root/pwndbg/pwndbg/commands/__init__.py", line 109, in __call__ return self.function(*args, **kwargs) File "/root/pwndbg/pwndbg/commands/__init__.py", line 189, in _OnlyWithFile return function(*a, **kw) File "/root/pwndbg/pwndbg/commands/checksec.py", line 16, in checksec print(pwndbg.wrappers.checksec.get_raw_out()) File "/root/pwndbg/pwndbg/commands/__init__.py", line 189, in _OnlyWithFile return function(*a, **kw) File "/root/pwndbg/pwndbg/wrappers/__init__.py", line 28, in _OnlyWithCommand return function(*a, **kw) File "/root/pwndbg/pwndbg/memoize.py", line 48, in __call__ value = self.func(*args, **kwargs) File "/root/pwndbg/pwndbg/wrappers/checksec.py", line 20, in get_raw_out return pwndbg.wrappers.call_cmd(cmd) File "/root/pwndbg/pwndbg/wrappers/__init__.py", line 35, in call_cmd return subprocess.check_output(cmd, stderr=STDOUT).decode('utf-8') File "/usr/lib/python3.7/subprocess.py", line 395, in check_output **kwargs).stdout File "/usr/lib/python3.7/subprocess.py", line 487, in run output=stdout, stderr=stderr) subprocess.CalledProcessError: Command '['/usr/local/bin/checksec', '--file', '/tmp/forgot']' returned non-zero exit status 2. ``` when checksec running in python3 pwntools environment, raise a failure. Here's the reason: ``` root@kali:/tmp# checksec --file forgot usage: checksec [-h] elf [elf ...] checksec: error: unrecognized arguments: --file ``` So modify pwndbg/pwndbg/wrappers/checksec.py: 19 with: `cmd = [get_raw_out.cmd_path, local_path]` Then it will be solved --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwndbg/wrappers/checksec.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 from __future__ import unicode_literals 7 8 import pwndbg.commands 9 import pwndbg.memoize 10 import pwndbg.wrappers 11 12 cmd_name = "checksec" 13 14 @pwndbg.wrappers.OnlyWithCommand(cmd_name) 15 @pwndbg.memoize.reset_on_objfile 16 def get_raw_out(): 17 18 local_path = pwndbg.file.get_file(pwndbg.proc.exe) 19 cmd = [get_raw_out.cmd_path, "--file", local_path] 20 return pwndbg.wrappers.call_cmd(cmd) 21 22 @pwndbg.wrappers.OnlyWithCommand(cmd_name) 23 def relro_status(): 24 relro = "No RELRO" 25 out = get_raw_out() 26 27 if "Full RELRO" in out: 28 relro = "Full RELRO" 29 elif "Partial RELRO" in out: 30 relro = "Partial RELRO" 31 32 return relro 33 34 @pwndbg.wrappers.OnlyWithCommand(cmd_name) 35 def pie_status(): 36 pie = "No PIE" 37 out = get_raw_out() 38 39 if "PIE enabled" in out: 40 pie = "PIE enabled" 41 42 return pie 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwndbg/wrappers/checksec.py b/pwndbg/wrappers/checksec.py --- a/pwndbg/wrappers/checksec.py +++ b/pwndbg/wrappers/checksec.py @@ -5,6 +5,10 @@ from __future__ import print_function from __future__ import unicode_literals +import subprocess +from re import search +from subprocess import STDOUT + import pwndbg.commands import pwndbg.memoize import pwndbg.wrappers @@ -14,10 +18,17 @@ @pwndbg.wrappers.OnlyWithCommand(cmd_name) @pwndbg.memoize.reset_on_objfile def get_raw_out(): - local_path = pwndbg.file.get_file(pwndbg.proc.exe) - cmd = [get_raw_out.cmd_path, "--file", local_path] - return pwndbg.wrappers.call_cmd(cmd) + try: + version_output = subprocess.check_output([get_raw_out.cmd_path, "--version"], stderr=STDOUT).decode('utf-8') + match = search('checksec v([\\w.]+),', version_output) + if match: + version = tuple(map(int, (match.group(1).split(".")))) + if version >= (2, 0): + return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, "--file=" + local_path]) + except Exception: + pass + return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, "--file", local_path]) @pwndbg.wrappers.OnlyWithCommand(cmd_name) def relro_status():
{"golden_diff": "diff --git a/pwndbg/wrappers/checksec.py b/pwndbg/wrappers/checksec.py\n--- a/pwndbg/wrappers/checksec.py\n+++ b/pwndbg/wrappers/checksec.py\n@@ -5,6 +5,10 @@\n from __future__ import print_function\n from __future__ import unicode_literals\n \n+import subprocess\n+from re import search\n+from subprocess import STDOUT\n+\n import pwndbg.commands\n import pwndbg.memoize\n import pwndbg.wrappers\n@@ -14,10 +18,17 @@\n @pwndbg.wrappers.OnlyWithCommand(cmd_name)\n @pwndbg.memoize.reset_on_objfile\n def get_raw_out():\n-\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n- cmd = [get_raw_out.cmd_path, \"--file\", local_path]\n- return pwndbg.wrappers.call_cmd(cmd)\n+ try:\n+ version_output = subprocess.check_output([get_raw_out.cmd_path, \"--version\"], stderr=STDOUT).decode('utf-8')\n+ match = search('checksec v([\\\\w.]+),', version_output)\n+ if match:\n+ version = tuple(map(int, (match.group(1).split(\".\"))))\n+ if version >= (2, 0):\n+ return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, \"--file=\" + local_path])\n+ except Exception:\n+ pass\n+ return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, \"--file\", local_path])\n \n @pwndbg.wrappers.OnlyWithCommand(cmd_name)\n def relro_status():\n", "issue": "subprocess.CalledProcessError: Command '['/usr/local/bin/checksec', '--file', '/tmp/xxx']' returned non-zero exit status 2.\n```\r\npwndbg> checksec \r\nTraceback (most recent call last):\r\n File \"/root/pwndbg/pwndbg/commands/__init__.py\", line 109, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/root/pwndbg/pwndbg/commands/__init__.py\", line 189, in _OnlyWithFile\r\n return function(*a, **kw)\r\n File \"/root/pwndbg/pwndbg/commands/checksec.py\", line 16, in checksec\r\n print(pwndbg.wrappers.checksec.get_raw_out())\r\n File \"/root/pwndbg/pwndbg/commands/__init__.py\", line 189, in _OnlyWithFile\r\n return function(*a, **kw)\r\n File \"/root/pwndbg/pwndbg/wrappers/__init__.py\", line 28, in _OnlyWithCommand\r\n return function(*a, **kw)\r\n File \"/root/pwndbg/pwndbg/memoize.py\", line 48, in __call__\r\n value = self.func(*args, **kwargs)\r\n File \"/root/pwndbg/pwndbg/wrappers/checksec.py\", line 20, in get_raw_out\r\n return pwndbg.wrappers.call_cmd(cmd)\r\n File \"/root/pwndbg/pwndbg/wrappers/__init__.py\", line 35, in call_cmd\r\n return subprocess.check_output(cmd, stderr=STDOUT).decode('utf-8')\r\n File \"/usr/lib/python3.7/subprocess.py\", line 395, in check_output\r\n **kwargs).stdout\r\n File \"/usr/lib/python3.7/subprocess.py\", line 487, in run\r\n output=stdout, stderr=stderr)\r\nsubprocess.CalledProcessError: Command '['/usr/local/bin/checksec', '--file', '/tmp/forgot']' returned non-zero exit status 2.\r\n```\r\n\r\nwhen checksec running in python3 pwntools environment, raise a failure. Here's the reason:\r\n```\r\nroot@kali:/tmp# checksec --file forgot\r\nusage: checksec [-h] elf [elf ...]\r\nchecksec: error: unrecognized arguments: --file\r\n```\r\n\r\n\r\nSo modify pwndbg/pwndbg/wrappers/checksec.py: 19 with:\r\n`cmd = [get_raw_out.cmd_path, local_path]`\r\n\r\nThen it will be solved\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport pwndbg.commands\nimport pwndbg.memoize\nimport pwndbg.wrappers\n\ncmd_name = \"checksec\"\n\n@pwndbg.wrappers.OnlyWithCommand(cmd_name)\n@pwndbg.memoize.reset_on_objfile\ndef get_raw_out():\n\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n cmd = [get_raw_out.cmd_path, \"--file\", local_path]\n return pwndbg.wrappers.call_cmd(cmd)\n\n@pwndbg.wrappers.OnlyWithCommand(cmd_name)\ndef relro_status():\n relro = \"No RELRO\"\n out = get_raw_out()\n\n if \"Full RELRO\" in out:\n relro = \"Full RELRO\"\n elif \"Partial RELRO\" in out:\n relro = \"Partial RELRO\"\n\n return relro\n\n@pwndbg.wrappers.OnlyWithCommand(cmd_name)\ndef pie_status():\n pie = \"No PIE\"\n out = get_raw_out()\n\n if \"PIE enabled\" in out:\n pie = \"PIE enabled\"\n\n return pie\n", "path": "pwndbg/wrappers/checksec.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport subprocess\nfrom re import search\nfrom subprocess import STDOUT\n\nimport pwndbg.commands\nimport pwndbg.memoize\nimport pwndbg.wrappers\n\ncmd_name = \"checksec\"\n\n@pwndbg.wrappers.OnlyWithCommand(cmd_name)\n@pwndbg.memoize.reset_on_objfile\ndef get_raw_out():\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n try:\n version_output = subprocess.check_output([get_raw_out.cmd_path, \"--version\"], stderr=STDOUT).decode('utf-8')\n match = search('checksec v([\\\\w.]+),', version_output)\n if match:\n version = tuple(map(int, (match.group(1).split(\".\"))))\n if version >= (2, 0):\n return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, \"--file=\" + local_path])\n except Exception:\n pass\n return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, \"--file\", local_path])\n\n@pwndbg.wrappers.OnlyWithCommand(cmd_name)\ndef relro_status():\n relro = \"No RELRO\"\n out = get_raw_out()\n\n if \"Full RELRO\" in out:\n relro = \"Full RELRO\"\n elif \"Partial RELRO\" in out:\n relro = \"Partial RELRO\"\n\n return relro\n\n@pwndbg.wrappers.OnlyWithCommand(cmd_name)\ndef pie_status():\n pie = \"No PIE\"\n out = get_raw_out()\n\n if \"PIE enabled\" in out:\n pie = \"PIE enabled\"\n\n return pie\n", "path": "pwndbg/wrappers/checksec.py"}]}
1,178
360