problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
10.2k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
582
21k
num_tokens
int64
271
2.05k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_1193
rasdani/github-patches
git_diff
conda__conda-3740
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- conda env create giving ImportError for yaml package `conda env create` suddenly started giving `"ImportError: No module named 'yaml'"` with latest miniconda on my TravisCI builbs: https://travis-ci.org/leouieda/website/builds/170917743 I changed nothing significant in my code. Tried rebuilding previous passing builds and started getting the same error. Is this something from a recent release? conda env create giving ImportError for yaml package `conda env create` suddenly started giving `"ImportError: No module named 'yaml'"` with latest miniconda on my TravisCI builbs: https://travis-ci.org/leouieda/website/builds/170917743 I changed nothing significant in my code. Tried rebuilding previous passing builds and started getting the same error. Is this something from a recent release? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conda_env/yaml.py` Content: ``` 1 """ 2 Wrapper around yaml to ensure that everything is ordered correctly. 3 4 This is based on the answer at http://stackoverflow.com/a/16782282 5 """ 6 from __future__ import absolute_import, print_function 7 from collections import OrderedDict 8 import yaml 9 10 11 def represent_ordereddict(dumper, data): 12 value = [] 13 14 for item_key, item_value in data.items(): 15 node_key = dumper.represent_data(item_key) 16 node_value = dumper.represent_data(item_value) 17 18 value.append((node_key, node_value)) 19 20 return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value) 21 22 yaml.add_representer(OrderedDict, represent_ordereddict) 23 24 dump = yaml.dump 25 load = yaml.load 26 dict = OrderedDict 27 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conda_env/yaml.py b/conda_env/yaml.py --- a/conda_env/yaml.py +++ b/conda_env/yaml.py @@ -5,7 +5,9 @@ """ from __future__ import absolute_import, print_function from collections import OrderedDict -import yaml + +from conda.common.yaml import get_yaml +yaml = get_yaml() def represent_ordereddict(dumper, data):
{"golden_diff": "diff --git a/conda_env/yaml.py b/conda_env/yaml.py\n--- a/conda_env/yaml.py\n+++ b/conda_env/yaml.py\n@@ -5,7 +5,9 @@\n \"\"\"\n from __future__ import absolute_import, print_function\n from collections import OrderedDict\n-import yaml\n+\n+from conda.common.yaml import get_yaml\n+yaml = get_yaml()\n \n \n def represent_ordereddict(dumper, data):\n", "issue": "conda env create giving ImportError for yaml package\n`conda env create` suddenly started giving `\"ImportError: No module named 'yaml'\"` with latest miniconda on my TravisCI builbs: https://travis-ci.org/leouieda/website/builds/170917743\n\nI changed nothing significant in my code. Tried rebuilding previous passing builds and started getting the same error. \n\nIs this something from a recent release?\n\nconda env create giving ImportError for yaml package\n`conda env create` suddenly started giving `\"ImportError: No module named 'yaml'\"` with latest miniconda on my TravisCI builbs: https://travis-ci.org/leouieda/website/builds/170917743\n\nI changed nothing significant in my code. Tried rebuilding previous passing builds and started getting the same error. \n\nIs this something from a recent release?\n\n", "before_files": [{"content": "\"\"\"\nWrapper around yaml to ensure that everything is ordered correctly.\n\nThis is based on the answer at http://stackoverflow.com/a/16782282\n\"\"\"\nfrom __future__ import absolute_import, print_function\nfrom collections import OrderedDict\nimport yaml\n\n\ndef represent_ordereddict(dumper, data):\n value = []\n\n for item_key, item_value in data.items():\n node_key = dumper.represent_data(item_key)\n node_value = dumper.represent_data(item_value)\n\n value.append((node_key, node_value))\n\n return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)\n\nyaml.add_representer(OrderedDict, represent_ordereddict)\n\ndump = yaml.dump\nload = yaml.load\ndict = OrderedDict\n", "path": "conda_env/yaml.py"}], "after_files": [{"content": "\"\"\"\nWrapper around yaml to ensure that everything is ordered correctly.\n\nThis is based on the answer at http://stackoverflow.com/a/16782282\n\"\"\"\nfrom __future__ import absolute_import, print_function\nfrom collections import OrderedDict\n\nfrom conda.common.yaml import get_yaml\nyaml = get_yaml()\n\n\ndef represent_ordereddict(dumper, data):\n value = []\n\n for item_key, item_value in data.items():\n node_key = dumper.represent_data(item_key)\n node_value = dumper.represent_data(item_value)\n\n value.append((node_key, node_value))\n\n return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)\n\nyaml.add_representer(OrderedDict, represent_ordereddict)\n\ndump = yaml.dump\nload = yaml.load\ndict = OrderedDict\n", "path": "conda_env/yaml.py"}]}
665
93
gh_patches_debug_16516
rasdani/github-patches
git_diff
keras-team__autokeras-1164
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Multi-label classification `predict` method return probabilities rather than calibrated labels. ### Bug Description <!--- A clear and concise description of what the bug is. --> Multi-label classification `predict()` method return probabilities rather than calibrated labels. ### Bug Reproduction https://github.com/datamllab/automl-in-action-notebooks/blob/master/3.3.4-Task-API-Multi-label.ipynb ### Setup Details Include the details about the versions of: - OS type and version: - Python: - autokeras: 1.0.2 - keras-tuner: - scikit-learn: - numpy: - pandas: - tensorflow: 2.1.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `autokeras/adapters/output_adapter.py` Content: ``` 1 import numpy as np 2 import pandas as pd 3 import tensorflow as tf 4 5 from autokeras import encoders 6 from autokeras.engine import adapter as adapter_module 7 from autokeras.utils import data_utils 8 9 10 class HeadAdapter(adapter_module.Adapter): 11 12 def __init__(self, name, **kwargs): 13 super().__init__(**kwargs) 14 self.name = name 15 16 def check(self, dataset): 17 supported_types = (tf.data.Dataset, np.ndarray, pd.DataFrame, pd.Series) 18 if not isinstance(dataset, supported_types): 19 raise TypeError('Expect the target data of {name} to be tf.data.Dataset,' 20 ' np.ndarray, pd.DataFrame or pd.Series, but got {type}.' 21 .format(name=self.name, type=type(dataset))) 22 23 def convert_to_dataset(self, dataset): 24 if isinstance(dataset, np.ndarray): 25 if len(dataset.shape) == 1: 26 dataset = dataset.reshape(-1, 1) 27 if isinstance(dataset, pd.DataFrame): 28 dataset = dataset.values 29 if isinstance(dataset, pd.Series): 30 dataset = dataset.values.reshape(-1, 1) 31 return super().convert_to_dataset(dataset) 32 33 def postprocess(self, y): 34 """Postprocess the output of the Keras Model.""" 35 return y 36 37 def get_config(self): 38 config = super().get_config() 39 config.update({ 40 'name': self.name, 41 }) 42 return config 43 44 45 class ClassificationHeadAdapter(HeadAdapter): 46 47 def __init__(self, 48 num_classes=None, 49 multi_label=False, 50 **kwargs): 51 super().__init__(**kwargs) 52 self.num_classes = num_classes 53 self.label_encoder = None 54 self.multi_label = multi_label 55 56 def get_config(self): 57 config = super().get_config() 58 config.update({ 59 'encoder': encoders.serialize(self.label_encoder), 60 }) 61 return config 62 63 @classmethod 64 def from_config(cls, config): 65 obj = super().from_config(config) 66 obj.label_encoder = encoders.deserialize(config['encoder']) 67 68 def fit_before_convert(self, dataset): 69 """Fit the encoder.""" 70 # If in tf.data.Dataset, must be encoded already. 71 if isinstance(dataset, tf.data.Dataset): 72 return 73 74 # Convert the data to np.ndarray. 75 if isinstance(dataset, pd.DataFrame): 76 dataset = dataset.values 77 if isinstance(dataset, pd.Series): 78 dataset = dataset.values.reshape(-1, 1) 79 80 # If encoded. 81 if len(dataset.flatten()) != len(dataset): 82 if self.num_classes: 83 self._check_data_shape(dataset.shape[1:]) 84 return 85 86 # Fit encoder. 87 labels = set(dataset.flatten()) 88 if len(labels) < 2: 89 raise ValueError('Expect the target data for {name} to have ' 90 'at least 2 classes, but got {num_classes}.' 91 .format(name=self.name, num_classes=self.num_classes)) 92 if len(labels) == 2 and not self.multi_label: 93 self.label_encoder = encoders.LabelEncoder() 94 else: 95 self.label_encoder = encoders.OneHotEncoder() 96 self.label_encoder.fit(dataset) 97 98 def convert_to_dataset(self, dataset): 99 if self.label_encoder: 100 dataset = self.label_encoder.encode(dataset) 101 return super().convert_to_dataset(dataset) 102 103 def fit(self, dataset): 104 super().fit(dataset) 105 shape = tuple(data_utils.dataset_shape(dataset).as_list()[1:]) 106 # Infer the num_classes. 107 if not self.num_classes: 108 # Single column with 0s and 1s. 109 if shape == (1,): 110 self.num_classes = 2 111 else: 112 self.num_classes = shape[0] 113 return 114 115 # Compute expected shape from num_classes. 116 if self.num_classes == 2 and not self.multi_label: 117 expected = (1,) 118 else: 119 expected = (self.num_classes,) 120 121 # Check shape equals expected shape. 122 if shape != expected: 123 raise ValueError('Expect the target data for {name} to have ' 124 'shape {expected}, but got {actual}.' 125 .format(name=self.name, expected=expected, 126 actual=shape)) 127 128 def postprocess(self, y): 129 if self.label_encoder: 130 y = self.label_encoder.decode(y) 131 return y 132 133 134 class RegressionHeadAdapter(HeadAdapter): 135 pass 136 137 138 class SegmentationHeadAdapter(ClassificationHeadAdapter): 139 pass 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/autokeras/adapters/output_adapter.py b/autokeras/adapters/output_adapter.py --- a/autokeras/adapters/output_adapter.py +++ b/autokeras/adapters/output_adapter.py @@ -78,6 +78,7 @@ dataset = dataset.values.reshape(-1, 1) # If encoded. + # TODO: support raw string labels for multi-label. if len(dataset.flatten()) != len(dataset): if self.num_classes: self._check_data_shape(dataset.shape[1:]) @@ -126,6 +127,9 @@ actual=shape)) def postprocess(self, y): + if self.multi_label: + y[y < 0.5] = 0 + y[y > 0.5] = 1 if self.label_encoder: y = self.label_encoder.decode(y) return y
{"golden_diff": "diff --git a/autokeras/adapters/output_adapter.py b/autokeras/adapters/output_adapter.py\n--- a/autokeras/adapters/output_adapter.py\n+++ b/autokeras/adapters/output_adapter.py\n@@ -78,6 +78,7 @@\n dataset = dataset.values.reshape(-1, 1)\n \n # If encoded.\n+ # TODO: support raw string labels for multi-label.\n if len(dataset.flatten()) != len(dataset):\n if self.num_classes:\n self._check_data_shape(dataset.shape[1:])\n@@ -126,6 +127,9 @@\n actual=shape))\n \n def postprocess(self, y):\n+ if self.multi_label:\n+ y[y < 0.5] = 0\n+ y[y > 0.5] = 1\n if self.label_encoder:\n y = self.label_encoder.decode(y)\n return y\n", "issue": "Multi-label classification `predict` method return probabilities rather than calibrated labels.\n### Bug Description\r\n<!---\r\nA clear and concise description of what the bug is.\r\n-->\r\n\r\nMulti-label classification `predict()` method return probabilities rather than calibrated labels.\r\n\r\n\r\n### Bug Reproduction\r\nhttps://github.com/datamllab/automl-in-action-notebooks/blob/master/3.3.4-Task-API-Multi-label.ipynb\r\n\r\n\r\n### Setup Details\r\nInclude the details about the versions of:\r\n - OS type and version:\r\n - Python: \r\n - autokeras: 1.0.2\r\n - keras-tuner:\r\n - scikit-learn:\r\n - numpy:\r\n - pandas:\r\n - tensorflow: 2.1.0\r\n\r\n\n", "before_files": [{"content": "import numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom autokeras import encoders\nfrom autokeras.engine import adapter as adapter_module\nfrom autokeras.utils import data_utils\n\n\nclass HeadAdapter(adapter_module.Adapter):\n\n def __init__(self, name, **kwargs):\n super().__init__(**kwargs)\n self.name = name\n\n def check(self, dataset):\n supported_types = (tf.data.Dataset, np.ndarray, pd.DataFrame, pd.Series)\n if not isinstance(dataset, supported_types):\n raise TypeError('Expect the target data of {name} to be tf.data.Dataset,'\n ' np.ndarray, pd.DataFrame or pd.Series, but got {type}.'\n .format(name=self.name, type=type(dataset)))\n\n def convert_to_dataset(self, dataset):\n if isinstance(dataset, np.ndarray):\n if len(dataset.shape) == 1:\n dataset = dataset.reshape(-1, 1)\n if isinstance(dataset, pd.DataFrame):\n dataset = dataset.values\n if isinstance(dataset, pd.Series):\n dataset = dataset.values.reshape(-1, 1)\n return super().convert_to_dataset(dataset)\n\n def postprocess(self, y):\n \"\"\"Postprocess the output of the Keras Model.\"\"\"\n return y\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'name': self.name,\n })\n return config\n\n\nclass ClassificationHeadAdapter(HeadAdapter):\n\n def __init__(self,\n num_classes=None,\n multi_label=False,\n **kwargs):\n super().__init__(**kwargs)\n self.num_classes = num_classes\n self.label_encoder = None\n self.multi_label = multi_label\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'encoder': encoders.serialize(self.label_encoder),\n })\n return config\n\n @classmethod\n def from_config(cls, config):\n obj = super().from_config(config)\n obj.label_encoder = encoders.deserialize(config['encoder'])\n\n def fit_before_convert(self, dataset):\n \"\"\"Fit the encoder.\"\"\"\n # If in tf.data.Dataset, must be encoded already.\n if isinstance(dataset, tf.data.Dataset):\n return\n\n # Convert the data to np.ndarray.\n if isinstance(dataset, pd.DataFrame):\n dataset = dataset.values\n if isinstance(dataset, pd.Series):\n dataset = dataset.values.reshape(-1, 1)\n\n # If encoded.\n if len(dataset.flatten()) != len(dataset):\n if self.num_classes:\n self._check_data_shape(dataset.shape[1:])\n return\n\n # Fit encoder.\n labels = set(dataset.flatten())\n if len(labels) < 2:\n raise ValueError('Expect the target data for {name} to have '\n 'at least 2 classes, but got {num_classes}.'\n .format(name=self.name, num_classes=self.num_classes))\n if len(labels) == 2 and not self.multi_label:\n self.label_encoder = encoders.LabelEncoder()\n else:\n self.label_encoder = encoders.OneHotEncoder()\n self.label_encoder.fit(dataset)\n\n def convert_to_dataset(self, dataset):\n if self.label_encoder:\n dataset = self.label_encoder.encode(dataset)\n return super().convert_to_dataset(dataset)\n\n def fit(self, dataset):\n super().fit(dataset)\n shape = tuple(data_utils.dataset_shape(dataset).as_list()[1:])\n # Infer the num_classes.\n if not self.num_classes:\n # Single column with 0s and 1s.\n if shape == (1,):\n self.num_classes = 2\n else:\n self.num_classes = shape[0]\n return\n\n # Compute expected shape from num_classes.\n if self.num_classes == 2 and not self.multi_label:\n expected = (1,)\n else:\n expected = (self.num_classes,)\n\n # Check shape equals expected shape.\n if shape != expected:\n raise ValueError('Expect the target data for {name} to have '\n 'shape {expected}, but got {actual}.'\n .format(name=self.name, expected=expected,\n actual=shape))\n\n def postprocess(self, y):\n if self.label_encoder:\n y = self.label_encoder.decode(y)\n return y\n\n\nclass RegressionHeadAdapter(HeadAdapter):\n pass\n\n\nclass SegmentationHeadAdapter(ClassificationHeadAdapter):\n pass\n", "path": "autokeras/adapters/output_adapter.py"}], "after_files": [{"content": "import numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom autokeras import encoders\nfrom autokeras.engine import adapter as adapter_module\nfrom autokeras.utils import data_utils\n\n\nclass HeadAdapter(adapter_module.Adapter):\n\n def __init__(self, name, **kwargs):\n super().__init__(**kwargs)\n self.name = name\n\n def check(self, dataset):\n supported_types = (tf.data.Dataset, np.ndarray, pd.DataFrame, pd.Series)\n if not isinstance(dataset, supported_types):\n raise TypeError('Expect the target data of {name} to be tf.data.Dataset,'\n ' np.ndarray, pd.DataFrame or pd.Series, but got {type}.'\n .format(name=self.name, type=type(dataset)))\n\n def convert_to_dataset(self, dataset):\n if isinstance(dataset, np.ndarray):\n if len(dataset.shape) == 1:\n dataset = dataset.reshape(-1, 1)\n if isinstance(dataset, pd.DataFrame):\n dataset = dataset.values\n if isinstance(dataset, pd.Series):\n dataset = dataset.values.reshape(-1, 1)\n return super().convert_to_dataset(dataset)\n\n def postprocess(self, y):\n \"\"\"Postprocess the output of the Keras Model.\"\"\"\n return y\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'name': self.name,\n })\n return config\n\n\nclass ClassificationHeadAdapter(HeadAdapter):\n\n def __init__(self,\n num_classes=None,\n multi_label=False,\n **kwargs):\n super().__init__(**kwargs)\n self.num_classes = num_classes\n self.label_encoder = None\n self.multi_label = multi_label\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'encoder': encoders.serialize(self.label_encoder),\n })\n return config\n\n @classmethod\n def from_config(cls, config):\n obj = super().from_config(config)\n obj.label_encoder = encoders.deserialize(config['encoder'])\n\n def fit_before_convert(self, dataset):\n \"\"\"Fit the encoder.\"\"\"\n # If in tf.data.Dataset, must be encoded already.\n if isinstance(dataset, tf.data.Dataset):\n return\n\n # Convert the data to np.ndarray.\n if isinstance(dataset, pd.DataFrame):\n dataset = dataset.values\n if isinstance(dataset, pd.Series):\n dataset = dataset.values.reshape(-1, 1)\n\n # If encoded.\n # TODO: support raw string labels for multi-label.\n if len(dataset.flatten()) != len(dataset):\n if self.num_classes:\n self._check_data_shape(dataset.shape[1:])\n return\n\n # Fit encoder.\n labels = set(dataset.flatten())\n if len(labels) < 2:\n raise ValueError('Expect the target data for {name} to have '\n 'at least 2 classes, but got {num_classes}.'\n .format(name=self.name, num_classes=self.num_classes))\n if len(labels) == 2 and not self.multi_label:\n self.label_encoder = encoders.LabelEncoder()\n else:\n self.label_encoder = encoders.OneHotEncoder()\n self.label_encoder.fit(dataset)\n\n def convert_to_dataset(self, dataset):\n if self.label_encoder:\n dataset = self.label_encoder.encode(dataset)\n return super().convert_to_dataset(dataset)\n\n def fit(self, dataset):\n super().fit(dataset)\n shape = tuple(data_utils.dataset_shape(dataset).as_list()[1:])\n # Infer the num_classes.\n if not self.num_classes:\n # Single column with 0s and 1s.\n if shape == (1,):\n self.num_classes = 2\n else:\n self.num_classes = shape[0]\n return\n\n # Compute expected shape from num_classes.\n if self.num_classes == 2 and not self.multi_label:\n expected = (1,)\n else:\n expected = (self.num_classes,)\n\n # Check shape equals expected shape.\n if shape != expected:\n raise ValueError('Expect the target data for {name} to have '\n 'shape {expected}, but got {actual}.'\n .format(name=self.name, expected=expected,\n actual=shape))\n\n def postprocess(self, y):\n if self.multi_label:\n y[y < 0.5] = 0\n y[y > 0.5] = 1\n if self.label_encoder:\n y = self.label_encoder.decode(y)\n return y\n\n\nclass RegressionHeadAdapter(HeadAdapter):\n pass\n\n\nclass SegmentationHeadAdapter(ClassificationHeadAdapter):\n pass\n", "path": "autokeras/adapters/output_adapter.py"}]}
1,665
200
gh_patches_debug_2868
rasdani/github-patches
git_diff
tobymao__sqlglot-2165
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spark raw String Support, comonly used with regexes This fails with sqlglot: ```python import sqlglot sql = """select regexp_replace('100-200', r'([^0-9])', '')""" sqlglot.parse_one(sql, read="databricks") ``` **Official Documentation** https://spark.apache.org/docs/latest/sql-ref-literals.html --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sqlglot/dialects/spark.py` Content: ``` 1 from __future__ import annotations 2 3 import typing as t 4 5 from sqlglot import exp 6 from sqlglot.dialects.dialect import rename_func 7 from sqlglot.dialects.spark2 import Spark2 8 from sqlglot.helper import seq_get 9 10 11 def _parse_datediff(args: t.List) -> exp.Expression: 12 """ 13 Although Spark docs don't mention the "unit" argument, Spark3 added support for 14 it at some point. Databricks also supports this variant (see below). 15 16 For example, in spark-sql (v3.3.1): 17 - SELECT DATEDIFF('2020-01-01', '2020-01-05') results in -4 18 - SELECT DATEDIFF(day, '2020-01-01', '2020-01-05') results in 4 19 20 See also: 21 - https://docs.databricks.com/sql/language-manual/functions/datediff3.html 22 - https://docs.databricks.com/sql/language-manual/functions/datediff.html 23 """ 24 unit = None 25 this = seq_get(args, 0) 26 expression = seq_get(args, 1) 27 28 if len(args) == 3: 29 unit = this 30 this = args[2] 31 32 return exp.DateDiff( 33 this=exp.TsOrDsToDate(this=this), expression=exp.TsOrDsToDate(this=expression), unit=unit 34 ) 35 36 37 class Spark(Spark2): 38 class Parser(Spark2.Parser): 39 FUNCTIONS = { 40 **Spark2.Parser.FUNCTIONS, 41 "ANY_VALUE": lambda args: exp.AnyValue( 42 this=seq_get(args, 0), ignore_nulls=seq_get(args, 1) 43 ), 44 "DATEDIFF": _parse_datediff, 45 } 46 47 FUNCTION_PARSERS = Spark2.Parser.FUNCTION_PARSERS.copy() 48 FUNCTION_PARSERS.pop("ANY_VALUE") 49 50 class Generator(Spark2.Generator): 51 TYPE_MAPPING = { 52 **Spark2.Generator.TYPE_MAPPING, 53 exp.DataType.Type.MONEY: "DECIMAL(15, 4)", 54 exp.DataType.Type.SMALLMONEY: "DECIMAL(6, 4)", 55 exp.DataType.Type.UNIQUEIDENTIFIER: "STRING", 56 } 57 58 TRANSFORMS = { 59 **Spark2.Generator.TRANSFORMS, 60 exp.StartsWith: rename_func("STARTSWITH"), 61 exp.TimestampAdd: lambda self, e: self.func( 62 "DATEADD", e.args.get("unit") or "DAY", e.expression, e.this 63 ), 64 } 65 TRANSFORMS.pop(exp.AnyValue) 66 TRANSFORMS.pop(exp.DateDiff) 67 TRANSFORMS.pop(exp.Group) 68 69 def anyvalue_sql(self, expression: exp.AnyValue) -> str: 70 return self.function_fallback_sql(expression) 71 72 def datediff_sql(self, expression: exp.DateDiff) -> str: 73 unit = self.sql(expression, "unit") 74 end = self.sql(expression, "this") 75 start = self.sql(expression, "expression") 76 77 if unit: 78 return self.func("DATEDIFF", unit, start, end) 79 80 return self.func("DATEDIFF", end, start) 81 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sqlglot/dialects/spark.py b/sqlglot/dialects/spark.py --- a/sqlglot/dialects/spark.py +++ b/sqlglot/dialects/spark.py @@ -35,6 +35,13 @@ class Spark(Spark2): + class Tokenizer(Spark2.Tokenizer): + RAW_STRINGS = [ + (prefix + q, q) + for q in t.cast(t.List[str], Spark2.Tokenizer.QUOTES) + for prefix in ("r", "R") + ] + class Parser(Spark2.Parser): FUNCTIONS = { **Spark2.Parser.FUNCTIONS,
{"golden_diff": "diff --git a/sqlglot/dialects/spark.py b/sqlglot/dialects/spark.py\n--- a/sqlglot/dialects/spark.py\n+++ b/sqlglot/dialects/spark.py\n@@ -35,6 +35,13 @@\n \n \n class Spark(Spark2):\n+ class Tokenizer(Spark2.Tokenizer):\n+ RAW_STRINGS = [\n+ (prefix + q, q)\n+ for q in t.cast(t.List[str], Spark2.Tokenizer.QUOTES)\n+ for prefix in (\"r\", \"R\")\n+ ]\n+\n class Parser(Spark2.Parser):\n FUNCTIONS = {\n **Spark2.Parser.FUNCTIONS,\n", "issue": "Spark raw String Support, comonly used with regexes\nThis fails with sqlglot:\r\n\r\n```python\r\nimport sqlglot\r\n\r\nsql = \"\"\"select regexp_replace('100-200', r'([^0-9])', '')\"\"\"\r\nsqlglot.parse_one(sql, read=\"databricks\")\r\n```\r\n\r\n**Official Documentation**\r\nhttps://spark.apache.org/docs/latest/sql-ref-literals.html\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp\nfrom sqlglot.dialects.dialect import rename_func\nfrom sqlglot.dialects.spark2 import Spark2\nfrom sqlglot.helper import seq_get\n\n\ndef _parse_datediff(args: t.List) -> exp.Expression:\n \"\"\"\n Although Spark docs don't mention the \"unit\" argument, Spark3 added support for\n it at some point. Databricks also supports this variant (see below).\n\n For example, in spark-sql (v3.3.1):\n - SELECT DATEDIFF('2020-01-01', '2020-01-05') results in -4\n - SELECT DATEDIFF(day, '2020-01-01', '2020-01-05') results in 4\n\n See also:\n - https://docs.databricks.com/sql/language-manual/functions/datediff3.html\n - https://docs.databricks.com/sql/language-manual/functions/datediff.html\n \"\"\"\n unit = None\n this = seq_get(args, 0)\n expression = seq_get(args, 1)\n\n if len(args) == 3:\n unit = this\n this = args[2]\n\n return exp.DateDiff(\n this=exp.TsOrDsToDate(this=this), expression=exp.TsOrDsToDate(this=expression), unit=unit\n )\n\n\nclass Spark(Spark2):\n class Parser(Spark2.Parser):\n FUNCTIONS = {\n **Spark2.Parser.FUNCTIONS,\n \"ANY_VALUE\": lambda args: exp.AnyValue(\n this=seq_get(args, 0), ignore_nulls=seq_get(args, 1)\n ),\n \"DATEDIFF\": _parse_datediff,\n }\n\n FUNCTION_PARSERS = Spark2.Parser.FUNCTION_PARSERS.copy()\n FUNCTION_PARSERS.pop(\"ANY_VALUE\")\n\n class Generator(Spark2.Generator):\n TYPE_MAPPING = {\n **Spark2.Generator.TYPE_MAPPING,\n exp.DataType.Type.MONEY: \"DECIMAL(15, 4)\",\n exp.DataType.Type.SMALLMONEY: \"DECIMAL(6, 4)\",\n exp.DataType.Type.UNIQUEIDENTIFIER: \"STRING\",\n }\n\n TRANSFORMS = {\n **Spark2.Generator.TRANSFORMS,\n exp.StartsWith: rename_func(\"STARTSWITH\"),\n exp.TimestampAdd: lambda self, e: self.func(\n \"DATEADD\", e.args.get(\"unit\") or \"DAY\", e.expression, e.this\n ),\n }\n TRANSFORMS.pop(exp.AnyValue)\n TRANSFORMS.pop(exp.DateDiff)\n TRANSFORMS.pop(exp.Group)\n\n def anyvalue_sql(self, expression: exp.AnyValue) -> str:\n return self.function_fallback_sql(expression)\n\n def datediff_sql(self, expression: exp.DateDiff) -> str:\n unit = self.sql(expression, \"unit\")\n end = self.sql(expression, \"this\")\n start = self.sql(expression, \"expression\")\n\n if unit:\n return self.func(\"DATEDIFF\", unit, start, end)\n\n return self.func(\"DATEDIFF\", end, start)\n", "path": "sqlglot/dialects/spark.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp\nfrom sqlglot.dialects.dialect import rename_func\nfrom sqlglot.dialects.spark2 import Spark2\nfrom sqlglot.helper import seq_get\n\n\ndef _parse_datediff(args: t.List) -> exp.Expression:\n \"\"\"\n Although Spark docs don't mention the \"unit\" argument, Spark3 added support for\n it at some point. Databricks also supports this variant (see below).\n\n For example, in spark-sql (v3.3.1):\n - SELECT DATEDIFF('2020-01-01', '2020-01-05') results in -4\n - SELECT DATEDIFF(day, '2020-01-01', '2020-01-05') results in 4\n\n See also:\n - https://docs.databricks.com/sql/language-manual/functions/datediff3.html\n - https://docs.databricks.com/sql/language-manual/functions/datediff.html\n \"\"\"\n unit = None\n this = seq_get(args, 0)\n expression = seq_get(args, 1)\n\n if len(args) == 3:\n unit = this\n this = args[2]\n\n return exp.DateDiff(\n this=exp.TsOrDsToDate(this=this), expression=exp.TsOrDsToDate(this=expression), unit=unit\n )\n\n\nclass Spark(Spark2):\n class Tokenizer(Spark2.Tokenizer):\n RAW_STRINGS = [\n (prefix + q, q)\n for q in t.cast(t.List[str], Spark2.Tokenizer.QUOTES)\n for prefix in (\"r\", \"R\")\n ]\n\n class Parser(Spark2.Parser):\n FUNCTIONS = {\n **Spark2.Parser.FUNCTIONS,\n \"ANY_VALUE\": lambda args: exp.AnyValue(\n this=seq_get(args, 0), ignore_nulls=seq_get(args, 1)\n ),\n \"DATEDIFF\": _parse_datediff,\n }\n\n FUNCTION_PARSERS = Spark2.Parser.FUNCTION_PARSERS.copy()\n FUNCTION_PARSERS.pop(\"ANY_VALUE\")\n\n class Generator(Spark2.Generator):\n TYPE_MAPPING = {\n **Spark2.Generator.TYPE_MAPPING,\n exp.DataType.Type.MONEY: \"DECIMAL(15, 4)\",\n exp.DataType.Type.SMALLMONEY: \"DECIMAL(6, 4)\",\n exp.DataType.Type.UNIQUEIDENTIFIER: \"STRING\",\n }\n\n TRANSFORMS = {\n **Spark2.Generator.TRANSFORMS,\n exp.StartsWith: rename_func(\"STARTSWITH\"),\n exp.TimestampAdd: lambda self, e: self.func(\n \"DATEADD\", e.args.get(\"unit\") or \"DAY\", e.expression, e.this\n ),\n }\n TRANSFORMS.pop(exp.AnyValue)\n TRANSFORMS.pop(exp.DateDiff)\n TRANSFORMS.pop(exp.Group)\n\n def anyvalue_sql(self, expression: exp.AnyValue) -> str:\n return self.function_fallback_sql(expression)\n\n def datediff_sql(self, expression: exp.DateDiff) -> str:\n unit = self.sql(expression, \"unit\")\n end = self.sql(expression, \"this\")\n start = self.sql(expression, \"expression\")\n\n if unit:\n return self.func(\"DATEDIFF\", unit, start, end)\n\n return self.func(\"DATEDIFF\", end, start)\n", "path": "sqlglot/dialects/spark.py"}]}
1,204
150
gh_patches_debug_22314
rasdani/github-patches
git_diff
learningequality__kolibri-5140
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- On (at least) one modal, dismissing with enter keypress causes it to come back ### Observed behavior This was seen with the privacy modal, and is in other places now: 1. Facility > Class Details > Rename Class 1. Privacy link on sign-in page 1. Privacy link on sign-up page 1. Profile page, change password modal 1. "Change" status button for Lessons It seems to be for the same reasons as #4973 (privacy modal), the "Edit" button maintains focus while the modal is on, so it gets "clicked" again when you dismiss the modal with an Enter keypress. The common thing between these two cases is that the privacy link and rename use the text-link versions of the Button. ### Expected behavior Text-type links do not have this side-effect when "clicked" by an enter-keypress ### Context Kolibri 0.12 latest --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/core/content/signals.py` Content: ``` 1 from django.db.models import F 2 from django.db.models.signals import pre_delete 3 from django.dispatch import receiver 4 5 from .models import ChannelMetadata 6 from .models import ContentNode 7 from kolibri.core.notifications.models import LearnerProgressNotification 8 9 10 @receiver(pre_delete, sender=ContentNode) 11 def cascade_delete_node(sender, instance=None, *args, **kwargs): 12 """ 13 For a given node, we delete all notifications 14 objects whose contentnode is the instance's node.. 15 """ 16 LearnerProgressNotification.objects.filter(contentnode_id=instance.id).delete() 17 18 19 @receiver(pre_delete, sender=ChannelMetadata) 20 def reorder_channels_upon_deletion(sender, instance=None, *args, **kwargs): 21 """ 22 For a given channel, decrement the order of all channels that come after this channel. 23 """ 24 ChannelMetadata.objects.filter(order__gt=instance.order).update(order=F('order') - 1) 25 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/core/content/signals.py b/kolibri/core/content/signals.py --- a/kolibri/core/content/signals.py +++ b/kolibri/core/content/signals.py @@ -5,6 +5,7 @@ from .models import ChannelMetadata from .models import ContentNode from kolibri.core.notifications.models import LearnerProgressNotification +from kolibri.core.lessons.models import Lesson @receiver(pre_delete, sender=ContentNode) @@ -22,3 +23,15 @@ For a given channel, decrement the order of all channels that come after this channel. """ ChannelMetadata.objects.filter(order__gt=instance.order).update(order=F('order') - 1) + + +@receiver(pre_delete, sender=ChannelMetadata) +def update_lesson_resources_before_delete(sender, instance=None, *args, **kwargs): + # Update the resources array of all lessons to ensure they don't have + # any deleted content + lessons = Lesson.objects.filter(resources__contains=instance.id) + for lesson in lessons: + updated_resources = [r for r in lesson.resources if r['channel_id'] != instance.id] + if len(updated_resources) < len(lesson.resources): + lesson.resources = updated_resources + lesson.save()
{"golden_diff": "diff --git a/kolibri/core/content/signals.py b/kolibri/core/content/signals.py\n--- a/kolibri/core/content/signals.py\n+++ b/kolibri/core/content/signals.py\n@@ -5,6 +5,7 @@\n from .models import ChannelMetadata\n from .models import ContentNode\n from kolibri.core.notifications.models import LearnerProgressNotification\n+from kolibri.core.lessons.models import Lesson\n \n \n @receiver(pre_delete, sender=ContentNode)\n@@ -22,3 +23,15 @@\n For a given channel, decrement the order of all channels that come after this channel.\n \"\"\"\n ChannelMetadata.objects.filter(order__gt=instance.order).update(order=F('order') - 1)\n+\n+\n+@receiver(pre_delete, sender=ChannelMetadata)\n+def update_lesson_resources_before_delete(sender, instance=None, *args, **kwargs):\n+ # Update the resources array of all lessons to ensure they don't have\n+ # any deleted content\n+ lessons = Lesson.objects.filter(resources__contains=instance.id)\n+ for lesson in lessons:\n+ updated_resources = [r for r in lesson.resources if r['channel_id'] != instance.id]\n+ if len(updated_resources) < len(lesson.resources):\n+ lesson.resources = updated_resources\n+ lesson.save()\n", "issue": "On (at least) one modal, dismissing with enter keypress causes it to come back\n### Observed behavior\r\n\r\nThis was seen with the privacy modal, and is in other places now:\r\n\r\n1. Facility > Class Details > Rename Class\r\n1. Privacy link on sign-in page\r\n1. Privacy link on sign-up page\r\n1. Profile page, change password modal\r\n1. \"Change\" status button for Lessons\r\n\r\nIt seems to be for the same reasons as #4973 (privacy modal), the \"Edit\" button maintains focus while the modal is on, so it gets \"clicked\" again when you dismiss the modal with an Enter keypress.\r\n\r\nThe common thing between these two cases is that the privacy link and rename use the text-link versions of the Button.\r\n\r\n### Expected behavior\r\n\r\nText-type links do not have this side-effect when \"clicked\" by an enter-keypress\r\n\r\n### Context\r\n\r\nKolibri 0.12 latest\n", "before_files": [{"content": "from django.db.models import F\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import receiver\n\nfrom .models import ChannelMetadata\nfrom .models import ContentNode\nfrom kolibri.core.notifications.models import LearnerProgressNotification\n\n\n@receiver(pre_delete, sender=ContentNode)\ndef cascade_delete_node(sender, instance=None, *args, **kwargs):\n \"\"\"\n For a given node, we delete all notifications\n objects whose contentnode is the instance's node..\n \"\"\"\n LearnerProgressNotification.objects.filter(contentnode_id=instance.id).delete()\n\n\n@receiver(pre_delete, sender=ChannelMetadata)\ndef reorder_channels_upon_deletion(sender, instance=None, *args, **kwargs):\n \"\"\"\n For a given channel, decrement the order of all channels that come after this channel.\n \"\"\"\n ChannelMetadata.objects.filter(order__gt=instance.order).update(order=F('order') - 1)\n", "path": "kolibri/core/content/signals.py"}], "after_files": [{"content": "from django.db.models import F\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import receiver\n\nfrom .models import ChannelMetadata\nfrom .models import ContentNode\nfrom kolibri.core.notifications.models import LearnerProgressNotification\nfrom kolibri.core.lessons.models import Lesson\n\n\n@receiver(pre_delete, sender=ContentNode)\ndef cascade_delete_node(sender, instance=None, *args, **kwargs):\n \"\"\"\n For a given node, we delete all notifications\n objects whose contentnode is the instance's node..\n \"\"\"\n LearnerProgressNotification.objects.filter(contentnode_id=instance.id).delete()\n\n\n@receiver(pre_delete, sender=ChannelMetadata)\ndef reorder_channels_upon_deletion(sender, instance=None, *args, **kwargs):\n \"\"\"\n For a given channel, decrement the order of all channels that come after this channel.\n \"\"\"\n ChannelMetadata.objects.filter(order__gt=instance.order).update(order=F('order') - 1)\n\n\n@receiver(pre_delete, sender=ChannelMetadata)\ndef update_lesson_resources_before_delete(sender, instance=None, *args, **kwargs):\n # Update the resources array of all lessons to ensure they don't have\n # any deleted content\n lessons = Lesson.objects.filter(resources__contains=instance.id)\n for lesson in lessons:\n updated_resources = [r for r in lesson.resources if r['channel_id'] != instance.id]\n if len(updated_resources) < len(lesson.resources):\n lesson.resources = updated_resources\n lesson.save()\n", "path": "kolibri/core/content/signals.py"}]}
688
282
gh_patches_debug_1463
rasdani/github-patches
git_diff
pre-commit__pre-commit-376
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Newly gitignored (but file still exists) files are linted (they should not be) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/git.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import functools 4 import logging 5 import os 6 import os.path 7 import re 8 9 from pre_commit.errors import FatalError 10 from pre_commit.util import CalledProcessError 11 from pre_commit.util import cmd_output 12 from pre_commit.util import memoize_by_cwd 13 14 15 logger = logging.getLogger('pre_commit') 16 17 18 def get_root(): 19 try: 20 return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip() 21 except CalledProcessError: 22 raise FatalError( 23 'Called from outside of the gits. Please cd to a git repository.' 24 ) 25 26 27 def get_git_dir(git_root): 28 return os.path.normpath(os.path.join( 29 git_root, 30 cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(), 31 )) 32 33 34 def is_in_merge_conflict(): 35 git_dir = get_git_dir('.') 36 return ( 37 os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and 38 os.path.exists(os.path.join(git_dir, 'MERGE_HEAD')) 39 ) 40 41 42 def parse_merge_msg_for_conflicts(merge_msg): 43 # Conflicted files start with tabs 44 return [ 45 line.lstrip('#').strip() 46 for line in merge_msg.splitlines() 47 # '#\t' for git 2.4.1 48 if line.startswith(('\t', '#\t')) 49 ] 50 51 52 @memoize_by_cwd 53 def get_conflicted_files(): 54 logger.info('Checking merge-conflict files only.') 55 # Need to get the conflicted files from the MERGE_MSG because they could 56 # have resolved the conflict by choosing one side or the other 57 merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read() 58 merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg) 59 60 # This will get the rest of the changes made after the merge. 61 # If they resolved the merge conflict by choosing a mesh of both sides 62 # this will also include the conflicted files 63 tree_hash = cmd_output('git', 'write-tree')[1].strip() 64 merge_diff_filenames = cmd_output( 65 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only', 66 )[1].splitlines() 67 return set(merge_conflict_filenames) | set(merge_diff_filenames) 68 69 70 @memoize_by_cwd 71 def get_staged_files(): 72 return cmd_output('git', 'diff', '--staged', '--name-only')[1].splitlines() 73 74 75 @memoize_by_cwd 76 def get_all_files(): 77 return cmd_output('git', 'ls-files')[1].splitlines() 78 79 80 def get_files_matching(all_file_list_strategy): 81 @functools.wraps(all_file_list_strategy) 82 @memoize_by_cwd 83 def wrapper(include_expr, exclude_expr): 84 include_regex = re.compile(include_expr) 85 exclude_regex = re.compile(exclude_expr) 86 return set( 87 filename 88 for filename in all_file_list_strategy() 89 if ( 90 include_regex.search(filename) and 91 not exclude_regex.search(filename) and 92 os.path.lexists(filename) 93 ) 94 ) 95 return wrapper 96 97 98 get_staged_files_matching = get_files_matching(get_staged_files) 99 get_all_files_matching = get_files_matching(get_all_files) 100 get_conflicted_files_matching = get_files_matching(get_conflicted_files) 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/git.py b/pre_commit/git.py --- a/pre_commit/git.py +++ b/pre_commit/git.py @@ -69,7 +69,11 @@ @memoize_by_cwd def get_staged_files(): - return cmd_output('git', 'diff', '--staged', '--name-only')[1].splitlines() + return cmd_output( + 'git', 'diff', '--staged', '--name-only', + # Everything except for D + '--diff-filter=ACMRTUXB' + )[1].splitlines() @memoize_by_cwd
{"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -69,7 +69,11 @@\n \n @memoize_by_cwd\n def get_staged_files():\n- return cmd_output('git', 'diff', '--staged', '--name-only')[1].splitlines()\n+ return cmd_output(\n+ 'git', 'diff', '--staged', '--name-only',\n+ # Everything except for D\n+ '--diff-filter=ACMRTUXB'\n+ )[1].splitlines()\n \n \n @memoize_by_cwd\n", "issue": "Newly gitignored (but file still exists) files are linted\n(they should not be)\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os\nimport os.path\nimport re\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'Called from outside of the gits. Please cd to a git repository.'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip('#').strip()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith(('\\t', '#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output('git', 'diff', '--staged', '--name-only')[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return set(\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.lexists(filename)\n )\n )\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n", "path": "pre_commit/git.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os\nimport os.path\nimport re\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'Called from outside of the gits. Please cd to a git repository.'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip('#').strip()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith(('\\t', '#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output(\n 'git', 'diff', '--staged', '--name-only',\n # Everything except for D\n '--diff-filter=ACMRTUXB'\n )[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return set(\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.lexists(filename)\n )\n )\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n", "path": "pre_commit/git.py"}]}
1,212
137
gh_patches_debug_39152
rasdani/github-patches
git_diff
easybuilders__easybuild-easyblocks-2630
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- MUMmer easyblock corrupts binaries by running apply_regex_substitutions I ran into weird issues with a PR for a toolchain bump of the existing MUMmer easyconfig, see: https://github.com/easybuilders/easybuild-easyconfigs/pull/14474 The resulting executable doesn't work at all: ``` $ file ./mummer ./mummer: ERROR: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), statically linked error reading (Invalid argument) $ ./mummer Segmentation fault (core dumped) ``` After some debugging, I found that the installation step does a `apply_regex_substitutions` on all files in the build directory, including binaries: https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/m/mummer.py#L75 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `easybuild/easyblocks/m/mummer.py` Content: ``` 1 ## 2 # Copyright 2009-2021 Ghent University 3 # 4 # This file is part of EasyBuild, 5 # originally created by the HPC team of the University of Ghent (http://ugent.be/hpc). 6 # 7 # https://github.com/easybuilders/easybuild 8 # 9 # EasyBuild is free software: you can redistribute it and/or modify 10 # it under the terms of the GNU General Public License as published by 11 # the Free Software Foundation v2. 12 # 13 # EasyBuild is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU General Public License for more details. 17 # 18 # You should have received a copy of the GNU General Public License 19 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. 20 # 21 # This work implements a part of the HPCBIOS project and is a component of the policy: 22 # http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html 23 ## 24 """ 25 EasyBuild support for building and installing MUMmer, implemented as an easyblock 26 27 @author: Cedric Laczny (Uni.Lu) 28 @author: Fotis Georgatos (Uni.Lu) 29 @author: Kenneth Hoste (Ghent University) 30 @author: Jens Timmerman (Ghent University) 31 @author: Matt Lesko (NIH/NHGRI) 32 """ 33 import os 34 35 from easybuild.easyblocks.generic.configuremake import ConfigureMake 36 from easybuild.easyblocks.perl import get_major_perl_version 37 from easybuild.tools.filetools import apply_regex_substitutions, copy_file, mkdir 38 from easybuild.tools.run import run_cmd 39 40 41 class EB_MUMmer(ConfigureMake): 42 """Support for building and installing MUMmer (rapidly aligning entire genomes).""" 43 44 def __init__(self, *args, **kwargs): 45 """Define list of bin/aux_bin files.""" 46 47 super(EB_MUMmer, self).__init__(*args, **kwargs) 48 49 self.bin_files = [ 50 "mummer", "annotate", "combineMUMs", "delta-filter", "gaps", "mgaps", 51 "repeat-match", "show-aligns", "show-coords", "show-tiling", "show-snps", 52 "show-diff", "exact-tandems", "mapview", "mummerplot", "nucmer", "promer", 53 "run-mummer1", "run-mummer3", "nucmer2xfig", "dnadiff", 54 ] 55 self.script_files = ["Foundation.pm"] 56 self.aux_bin_files = ["postnuc", "postpro", "prenuc", "prepro"] 57 58 def configure_step(self): 59 """Configure MUMmer build by running make check and setting make options.""" 60 61 cmd = "%s make check %s" % (self.cfg['preconfigopts'], self.cfg['configopts']) 62 run_cmd(cmd, log_all=True, simple=True, log_output=True) 63 64 self.cfg.update('buildopts', 'all') 65 66 def install_step(self): 67 """Patch files to avoid use of build dir, install by copying files to install dir.""" 68 # patch build dir out of files, replace by install dir 69 pattern = r'%s' % self.cfg['start_dir'] 70 if pattern[-1] == os.path.sep: 71 pattern = pattern[:-1] 72 73 installdir_bin = os.path.join(self.installdir, 'bin') 74 75 for fil in [f for f in os.listdir(self.cfg['start_dir']) if os.path.isfile(f)]: 76 self.log.debug("Patching build dir out of %s, replacing by install bin dir)", fil) 77 apply_regex_substitutions(fil, [(pattern, installdir_bin)]) 78 79 # copy files to install dir 80 file_tuples = [ 81 (self.cfg['start_dir'], 'bin', self.bin_files), 82 (os.path.join(self.cfg['start_dir'], 'aux_bin'), os.path.join('bin', 'aux_bin'), self.aux_bin_files), 83 (os.path.join(self.cfg['start_dir'], 'scripts'), os.path.join('bin', 'scripts'), self.script_files), 84 ] 85 for srcdir, dest, files in file_tuples: 86 destdir = os.path.join(self.installdir, dest) 87 mkdir(destdir, parents=True) 88 for filename in files: 89 srcfile = os.path.join(srcdir, filename) 90 copy_file(srcfile, destdir) 91 92 def make_module_extra(self): 93 """Correctly prepend $PATH and $PERLXLIB for MUMmer.""" 94 # determine major version for Perl (e.g. '5'), required for e.g. $PERL5LIB 95 perlmajver = get_major_perl_version() 96 97 # set $PATH and $PERLXLIB correctly 98 txt = super(EB_MUMmer, self).make_module_extra() 99 txt += self.module_generator.prepend_paths("PATH", ['bin']) 100 txt += self.module_generator.prepend_paths("PATH", ['bin/aux_bin']) 101 txt += self.module_generator.prepend_paths("PERL%sLIB" % perlmajver, ['bin/scripts']) 102 return txt 103 104 def sanity_check_step(self): 105 """Custom sanity check for MUMmer.""" 106 107 custom_paths = { 108 'files': 109 ['bin/%s' % x for x in self.bin_files] + 110 ['bin/aux_bin/%s' % x for x in self.aux_bin_files] + 111 ['bin/scripts/%s' % x for x in self.script_files], 112 'dirs': [] 113 } 114 super(EB_MUMmer, self).sanity_check_step(custom_paths=custom_paths) 115 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/easybuild/easyblocks/m/mummer.py b/easybuild/easyblocks/m/mummer.py --- a/easybuild/easyblocks/m/mummer.py +++ b/easybuild/easyblocks/m/mummer.py @@ -34,7 +34,7 @@ from easybuild.easyblocks.generic.configuremake import ConfigureMake from easybuild.easyblocks.perl import get_major_perl_version -from easybuild.tools.filetools import apply_regex_substitutions, copy_file, mkdir +from easybuild.tools.filetools import apply_regex_substitutions, copy_file, is_binary, mkdir, read_file from easybuild.tools.run import run_cmd @@ -63,6 +63,14 @@ self.cfg.update('buildopts', 'all') + def build_step(self): + """Custom build procedure for MUMmer.""" + + # make sure that compiler options specified by EasyBuild are used + self.cfg.update('buildopts', 'CXXFLAGS="$CXXFLAGS" CFLAGS="$CFLAGS"') + + super(EB_MUMmer, self).build_step() + def install_step(self): """Patch files to avoid use of build dir, install by copying files to install dir.""" # patch build dir out of files, replace by install dir @@ -73,8 +81,11 @@ installdir_bin = os.path.join(self.installdir, 'bin') for fil in [f for f in os.listdir(self.cfg['start_dir']) if os.path.isfile(f)]: - self.log.debug("Patching build dir out of %s, replacing by install bin dir)", fil) - apply_regex_substitutions(fil, [(pattern, installdir_bin)]) + # only use apply_regex_substitutions() on non-binary files + # for more details, see https://github.com/easybuilders/easybuild-easyblocks/issues/2629) + if not is_binary(read_file(fil, mode='rb')): + self.log.debug("Patching build dir out of %s, replacing by install bin dir)", fil) + apply_regex_substitutions(fil, [(pattern, installdir_bin)]) # copy files to install dir file_tuples = [ @@ -96,7 +107,6 @@ # set $PATH and $PERLXLIB correctly txt = super(EB_MUMmer, self).make_module_extra() - txt += self.module_generator.prepend_paths("PATH", ['bin']) txt += self.module_generator.prepend_paths("PATH", ['bin/aux_bin']) txt += self.module_generator.prepend_paths("PERL%sLIB" % perlmajver, ['bin/scripts']) return txt @@ -111,4 +121,7 @@ ['bin/scripts/%s' % x for x in self.script_files], 'dirs': [] } - super(EB_MUMmer, self).sanity_check_step(custom_paths=custom_paths) + + custom_commands = ["mummer -h"] + + super(EB_MUMmer, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
{"golden_diff": "diff --git a/easybuild/easyblocks/m/mummer.py b/easybuild/easyblocks/m/mummer.py\n--- a/easybuild/easyblocks/m/mummer.py\n+++ b/easybuild/easyblocks/m/mummer.py\n@@ -34,7 +34,7 @@\n \n from easybuild.easyblocks.generic.configuremake import ConfigureMake\n from easybuild.easyblocks.perl import get_major_perl_version\n-from easybuild.tools.filetools import apply_regex_substitutions, copy_file, mkdir\n+from easybuild.tools.filetools import apply_regex_substitutions, copy_file, is_binary, mkdir, read_file\n from easybuild.tools.run import run_cmd\n \n \n@@ -63,6 +63,14 @@\n \n self.cfg.update('buildopts', 'all')\n \n+ def build_step(self):\n+ \"\"\"Custom build procedure for MUMmer.\"\"\"\n+\n+ # make sure that compiler options specified by EasyBuild are used\n+ self.cfg.update('buildopts', 'CXXFLAGS=\"$CXXFLAGS\" CFLAGS=\"$CFLAGS\"')\n+\n+ super(EB_MUMmer, self).build_step()\n+\n def install_step(self):\n \"\"\"Patch files to avoid use of build dir, install by copying files to install dir.\"\"\"\n # patch build dir out of files, replace by install dir\n@@ -73,8 +81,11 @@\n installdir_bin = os.path.join(self.installdir, 'bin')\n \n for fil in [f for f in os.listdir(self.cfg['start_dir']) if os.path.isfile(f)]:\n- self.log.debug(\"Patching build dir out of %s, replacing by install bin dir)\", fil)\n- apply_regex_substitutions(fil, [(pattern, installdir_bin)])\n+ # only use apply_regex_substitutions() on non-binary files\n+ # for more details, see https://github.com/easybuilders/easybuild-easyblocks/issues/2629)\n+ if not is_binary(read_file(fil, mode='rb')):\n+ self.log.debug(\"Patching build dir out of %s, replacing by install bin dir)\", fil)\n+ apply_regex_substitutions(fil, [(pattern, installdir_bin)])\n \n # copy files to install dir\n file_tuples = [\n@@ -96,7 +107,6 @@\n \n # set $PATH and $PERLXLIB correctly\n txt = super(EB_MUMmer, self).make_module_extra()\n- txt += self.module_generator.prepend_paths(\"PATH\", ['bin'])\n txt += self.module_generator.prepend_paths(\"PATH\", ['bin/aux_bin'])\n txt += self.module_generator.prepend_paths(\"PERL%sLIB\" % perlmajver, ['bin/scripts'])\n return txt\n@@ -111,4 +121,7 @@\n ['bin/scripts/%s' % x for x in self.script_files],\n 'dirs': []\n }\n- super(EB_MUMmer, self).sanity_check_step(custom_paths=custom_paths)\n+\n+ custom_commands = [\"mummer -h\"]\n+\n+ super(EB_MUMmer, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)\n", "issue": "MUMmer easyblock corrupts binaries by running apply_regex_substitutions\nI ran into weird issues with a PR for a toolchain bump of the existing MUMmer easyconfig, see:\r\nhttps://github.com/easybuilders/easybuild-easyconfigs/pull/14474\r\n\r\nThe resulting executable doesn't work at all:\r\n```\r\n$ file ./mummer\r\n./mummer: ERROR: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), statically linked error reading (Invalid argument)\r\n\r\n$ ./mummer\r\nSegmentation fault (core dumped)\r\n```\r\n\r\nAfter some debugging, I found that the installation step does a `apply_regex_substitutions` on all files in the build directory, including binaries:\r\nhttps://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/m/mummer.py#L75\r\n\n", "before_files": [{"content": "##\n# Copyright 2009-2021 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of the University of Ghent (http://ugent.be/hpc).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n#\n# This work implements a part of the HPCBIOS project and is a component of the policy:\n# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html\n##\n\"\"\"\nEasyBuild support for building and installing MUMmer, implemented as an easyblock\n\n@author: Cedric Laczny (Uni.Lu)\n@author: Fotis Georgatos (Uni.Lu)\n@author: Kenneth Hoste (Ghent University)\n@author: Jens Timmerman (Ghent University)\n@author: Matt Lesko (NIH/NHGRI)\n\"\"\"\nimport os\n\nfrom easybuild.easyblocks.generic.configuremake import ConfigureMake\nfrom easybuild.easyblocks.perl import get_major_perl_version\nfrom easybuild.tools.filetools import apply_regex_substitutions, copy_file, mkdir\nfrom easybuild.tools.run import run_cmd\n\n\nclass EB_MUMmer(ConfigureMake):\n \"\"\"Support for building and installing MUMmer (rapidly aligning entire genomes).\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Define list of bin/aux_bin files.\"\"\"\n\n super(EB_MUMmer, self).__init__(*args, **kwargs)\n\n self.bin_files = [\n \"mummer\", \"annotate\", \"combineMUMs\", \"delta-filter\", \"gaps\", \"mgaps\",\n \"repeat-match\", \"show-aligns\", \"show-coords\", \"show-tiling\", \"show-snps\",\n \"show-diff\", \"exact-tandems\", \"mapview\", \"mummerplot\", \"nucmer\", \"promer\",\n \"run-mummer1\", \"run-mummer3\", \"nucmer2xfig\", \"dnadiff\",\n ]\n self.script_files = [\"Foundation.pm\"]\n self.aux_bin_files = [\"postnuc\", \"postpro\", \"prenuc\", \"prepro\"]\n\n def configure_step(self):\n \"\"\"Configure MUMmer build by running make check and setting make options.\"\"\"\n\n cmd = \"%s make check %s\" % (self.cfg['preconfigopts'], self.cfg['configopts'])\n run_cmd(cmd, log_all=True, simple=True, log_output=True)\n\n self.cfg.update('buildopts', 'all')\n\n def install_step(self):\n \"\"\"Patch files to avoid use of build dir, install by copying files to install dir.\"\"\"\n # patch build dir out of files, replace by install dir\n pattern = r'%s' % self.cfg['start_dir']\n if pattern[-1] == os.path.sep:\n pattern = pattern[:-1]\n\n installdir_bin = os.path.join(self.installdir, 'bin')\n\n for fil in [f for f in os.listdir(self.cfg['start_dir']) if os.path.isfile(f)]:\n self.log.debug(\"Patching build dir out of %s, replacing by install bin dir)\", fil)\n apply_regex_substitutions(fil, [(pattern, installdir_bin)])\n\n # copy files to install dir\n file_tuples = [\n (self.cfg['start_dir'], 'bin', self.bin_files),\n (os.path.join(self.cfg['start_dir'], 'aux_bin'), os.path.join('bin', 'aux_bin'), self.aux_bin_files),\n (os.path.join(self.cfg['start_dir'], 'scripts'), os.path.join('bin', 'scripts'), self.script_files),\n ]\n for srcdir, dest, files in file_tuples:\n destdir = os.path.join(self.installdir, dest)\n mkdir(destdir, parents=True)\n for filename in files:\n srcfile = os.path.join(srcdir, filename)\n copy_file(srcfile, destdir)\n\n def make_module_extra(self):\n \"\"\"Correctly prepend $PATH and $PERLXLIB for MUMmer.\"\"\"\n # determine major version for Perl (e.g. '5'), required for e.g. $PERL5LIB\n perlmajver = get_major_perl_version()\n\n # set $PATH and $PERLXLIB correctly\n txt = super(EB_MUMmer, self).make_module_extra()\n txt += self.module_generator.prepend_paths(\"PATH\", ['bin'])\n txt += self.module_generator.prepend_paths(\"PATH\", ['bin/aux_bin'])\n txt += self.module_generator.prepend_paths(\"PERL%sLIB\" % perlmajver, ['bin/scripts'])\n return txt\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for MUMmer.\"\"\"\n\n custom_paths = {\n 'files':\n ['bin/%s' % x for x in self.bin_files] +\n ['bin/aux_bin/%s' % x for x in self.aux_bin_files] +\n ['bin/scripts/%s' % x for x in self.script_files],\n 'dirs': []\n }\n super(EB_MUMmer, self).sanity_check_step(custom_paths=custom_paths)\n", "path": "easybuild/easyblocks/m/mummer.py"}], "after_files": [{"content": "##\n# Copyright 2009-2021 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of the University of Ghent (http://ugent.be/hpc).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n#\n# This work implements a part of the HPCBIOS project and is a component of the policy:\n# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html\n##\n\"\"\"\nEasyBuild support for building and installing MUMmer, implemented as an easyblock\n\n@author: Cedric Laczny (Uni.Lu)\n@author: Fotis Georgatos (Uni.Lu)\n@author: Kenneth Hoste (Ghent University)\n@author: Jens Timmerman (Ghent University)\n@author: Matt Lesko (NIH/NHGRI)\n\"\"\"\nimport os\n\nfrom easybuild.easyblocks.generic.configuremake import ConfigureMake\nfrom easybuild.easyblocks.perl import get_major_perl_version\nfrom easybuild.tools.filetools import apply_regex_substitutions, copy_file, is_binary, mkdir, read_file\nfrom easybuild.tools.run import run_cmd\n\n\nclass EB_MUMmer(ConfigureMake):\n \"\"\"Support for building and installing MUMmer (rapidly aligning entire genomes).\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Define list of bin/aux_bin files.\"\"\"\n\n super(EB_MUMmer, self).__init__(*args, **kwargs)\n\n self.bin_files = [\n \"mummer\", \"annotate\", \"combineMUMs\", \"delta-filter\", \"gaps\", \"mgaps\",\n \"repeat-match\", \"show-aligns\", \"show-coords\", \"show-tiling\", \"show-snps\",\n \"show-diff\", \"exact-tandems\", \"mapview\", \"mummerplot\", \"nucmer\", \"promer\",\n \"run-mummer1\", \"run-mummer3\", \"nucmer2xfig\", \"dnadiff\",\n ]\n self.script_files = [\"Foundation.pm\"]\n self.aux_bin_files = [\"postnuc\", \"postpro\", \"prenuc\", \"prepro\"]\n\n def configure_step(self):\n \"\"\"Configure MUMmer build by running make check and setting make options.\"\"\"\n\n cmd = \"%s make check %s\" % (self.cfg['preconfigopts'], self.cfg['configopts'])\n run_cmd(cmd, log_all=True, simple=True, log_output=True)\n\n self.cfg.update('buildopts', 'all')\n\n def build_step(self):\n \"\"\"Custom build procedure for MUMmer.\"\"\"\n\n # make sure that compiler options specified by EasyBuild are used\n self.cfg.update('buildopts', 'CXXFLAGS=\"$CXXFLAGS\" CFLAGS=\"$CFLAGS\"')\n\n super(EB_MUMmer, self).build_step()\n\n def install_step(self):\n \"\"\"Patch files to avoid use of build dir, install by copying files to install dir.\"\"\"\n # patch build dir out of files, replace by install dir\n pattern = r'%s' % self.cfg['start_dir']\n if pattern[-1] == os.path.sep:\n pattern = pattern[:-1]\n\n installdir_bin = os.path.join(self.installdir, 'bin')\n\n for fil in [f for f in os.listdir(self.cfg['start_dir']) if os.path.isfile(f)]:\n # only use apply_regex_substitutions() on non-binary files\n # for more details, see https://github.com/easybuilders/easybuild-easyblocks/issues/2629)\n if not is_binary(read_file(fil, mode='rb')):\n self.log.debug(\"Patching build dir out of %s, replacing by install bin dir)\", fil)\n apply_regex_substitutions(fil, [(pattern, installdir_bin)])\n\n # copy files to install dir\n file_tuples = [\n (self.cfg['start_dir'], 'bin', self.bin_files),\n (os.path.join(self.cfg['start_dir'], 'aux_bin'), os.path.join('bin', 'aux_bin'), self.aux_bin_files),\n (os.path.join(self.cfg['start_dir'], 'scripts'), os.path.join('bin', 'scripts'), self.script_files),\n ]\n for srcdir, dest, files in file_tuples:\n destdir = os.path.join(self.installdir, dest)\n mkdir(destdir, parents=True)\n for filename in files:\n srcfile = os.path.join(srcdir, filename)\n copy_file(srcfile, destdir)\n\n def make_module_extra(self):\n \"\"\"Correctly prepend $PATH and $PERLXLIB for MUMmer.\"\"\"\n # determine major version for Perl (e.g. '5'), required for e.g. $PERL5LIB\n perlmajver = get_major_perl_version()\n\n # set $PATH and $PERLXLIB correctly\n txt = super(EB_MUMmer, self).make_module_extra()\n txt += self.module_generator.prepend_paths(\"PATH\", ['bin/aux_bin'])\n txt += self.module_generator.prepend_paths(\"PERL%sLIB\" % perlmajver, ['bin/scripts'])\n return txt\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for MUMmer.\"\"\"\n\n custom_paths = {\n 'files':\n ['bin/%s' % x for x in self.bin_files] +\n ['bin/aux_bin/%s' % x for x in self.aux_bin_files] +\n ['bin/scripts/%s' % x for x in self.script_files],\n 'dirs': []\n }\n\n custom_commands = [\"mummer -h\"]\n\n super(EB_MUMmer, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)\n", "path": "easybuild/easyblocks/m/mummer.py"}]}
1,931
688
gh_patches_debug_14135
rasdani/github-patches
git_diff
OpenNMT__OpenNMT-py-1206
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ImportError for get_rank function While running the **preprocess.py** I am getting an ImportError of get_rank function from Pytorch. What I have found on the internet that it is now deprecated and is inside the deprecated module. I can send a pull request for a fix of that? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `onmt/utils/statistics.py` Content: ``` 1 """ Statistics calculation utility """ 2 from __future__ import division 3 import time 4 import math 5 import sys 6 7 from torch.distributed import get_rank 8 from onmt.utils.distributed import all_gather_list 9 from onmt.utils.logging import logger 10 11 12 class Statistics(object): 13 """ 14 Accumulator for loss statistics. 15 Currently calculates: 16 17 * accuracy 18 * perplexity 19 * elapsed time 20 """ 21 22 def __init__(self, loss=0, n_words=0, n_correct=0): 23 self.loss = loss 24 self.n_words = n_words 25 self.n_correct = n_correct 26 self.n_src_words = 0 27 self.start_time = time.time() 28 29 @staticmethod 30 def all_gather_stats(stat, max_size=4096): 31 """ 32 Gather a `Statistics` object accross multiple process/nodes 33 34 Args: 35 stat(:obj:Statistics): the statistics object to gather 36 accross all processes/nodes 37 max_size(int): max buffer size to use 38 39 Returns: 40 `Statistics`, the update stats object 41 """ 42 stats = Statistics.all_gather_stats_list([stat], max_size=max_size) 43 return stats[0] 44 45 @staticmethod 46 def all_gather_stats_list(stat_list, max_size=4096): 47 """ 48 Gather a `Statistics` list accross all processes/nodes 49 50 Args: 51 stat_list(list([`Statistics`])): list of statistics objects to 52 gather accross all processes/nodes 53 max_size(int): max buffer size to use 54 55 Returns: 56 our_stats(list([`Statistics`])): list of updated stats 57 """ 58 # Get a list of world_size lists with len(stat_list) Statistics objects 59 all_stats = all_gather_list(stat_list, max_size=max_size) 60 61 our_rank = get_rank() 62 our_stats = all_stats[our_rank] 63 for other_rank, stats in enumerate(all_stats): 64 if other_rank == our_rank: 65 continue 66 for i, stat in enumerate(stats): 67 our_stats[i].update(stat, update_n_src_words=True) 68 return our_stats 69 70 def update(self, stat, update_n_src_words=False): 71 """ 72 Update statistics by suming values with another `Statistics` object 73 74 Args: 75 stat: another statistic object 76 update_n_src_words(bool): whether to update (sum) `n_src_words` 77 or not 78 79 """ 80 self.loss += stat.loss 81 self.n_words += stat.n_words 82 self.n_correct += stat.n_correct 83 84 if update_n_src_words: 85 self.n_src_words += stat.n_src_words 86 87 def accuracy(self): 88 """ compute accuracy """ 89 return 100 * (self.n_correct / self.n_words) 90 91 def xent(self): 92 """ compute cross entropy """ 93 return self.loss / self.n_words 94 95 def ppl(self): 96 """ compute perplexity """ 97 return math.exp(min(self.loss / self.n_words, 100)) 98 99 def elapsed_time(self): 100 """ compute elapsed time """ 101 return time.time() - self.start_time 102 103 def output(self, step, num_steps, learning_rate, start): 104 """Write out statistics to stdout. 105 106 Args: 107 step (int): current step 108 n_batch (int): total batches 109 start (int): start time of step. 110 """ 111 t = self.elapsed_time() 112 logger.info( 113 ("Step %2d/%5d; acc: %6.2f; ppl: %5.2f; xent: %4.2f; " + 114 "lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec") 115 % (step, num_steps, 116 self.accuracy(), 117 self.ppl(), 118 self.xent(), 119 learning_rate, 120 self.n_src_words / (t + 1e-5), 121 self.n_words / (t + 1e-5), 122 time.time() - start)) 123 sys.stdout.flush() 124 125 def log_tensorboard(self, prefix, writer, learning_rate, step): 126 """ display statistics to tensorboard """ 127 t = self.elapsed_time() 128 writer.add_scalar(prefix + "/xent", self.xent(), step) 129 writer.add_scalar(prefix + "/ppl", self.ppl(), step) 130 writer.add_scalar(prefix + "/accuracy", self.accuracy(), step) 131 writer.add_scalar(prefix + "/tgtper", self.n_words / t, step) 132 writer.add_scalar(prefix + "/lr", learning_rate, step) 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/onmt/utils/statistics.py b/onmt/utils/statistics.py --- a/onmt/utils/statistics.py +++ b/onmt/utils/statistics.py @@ -4,8 +4,6 @@ import math import sys -from torch.distributed import get_rank -from onmt.utils.distributed import all_gather_list from onmt.utils.logging import logger @@ -55,6 +53,9 @@ Returns: our_stats(list([`Statistics`])): list of updated stats """ + from torch.distributed import get_rank + from onmt.utils.distributed import all_gather_list + # Get a list of world_size lists with len(stat_list) Statistics objects all_stats = all_gather_list(stat_list, max_size=max_size)
{"golden_diff": "diff --git a/onmt/utils/statistics.py b/onmt/utils/statistics.py\n--- a/onmt/utils/statistics.py\n+++ b/onmt/utils/statistics.py\n@@ -4,8 +4,6 @@\n import math\n import sys\n \n-from torch.distributed import get_rank\n-from onmt.utils.distributed import all_gather_list\n from onmt.utils.logging import logger\n \n \n@@ -55,6 +53,9 @@\n Returns:\n our_stats(list([`Statistics`])): list of updated stats\n \"\"\"\n+ from torch.distributed import get_rank\n+ from onmt.utils.distributed import all_gather_list\n+\n # Get a list of world_size lists with len(stat_list) Statistics objects\n all_stats = all_gather_list(stat_list, max_size=max_size)\n", "issue": "ImportError for get_rank function\nWhile running the **preprocess.py** I am getting an ImportError of get_rank function from Pytorch. What I have found on the internet that it is now deprecated and is inside the deprecated module. I can send a pull request for a fix of that?\n", "before_files": [{"content": "\"\"\" Statistics calculation utility \"\"\"\nfrom __future__ import division\nimport time\nimport math\nimport sys\n\nfrom torch.distributed import get_rank\nfrom onmt.utils.distributed import all_gather_list\nfrom onmt.utils.logging import logger\n\n\nclass Statistics(object):\n \"\"\"\n Accumulator for loss statistics.\n Currently calculates:\n\n * accuracy\n * perplexity\n * elapsed time\n \"\"\"\n\n def __init__(self, loss=0, n_words=0, n_correct=0):\n self.loss = loss\n self.n_words = n_words\n self.n_correct = n_correct\n self.n_src_words = 0\n self.start_time = time.time()\n\n @staticmethod\n def all_gather_stats(stat, max_size=4096):\n \"\"\"\n Gather a `Statistics` object accross multiple process/nodes\n\n Args:\n stat(:obj:Statistics): the statistics object to gather\n accross all processes/nodes\n max_size(int): max buffer size to use\n\n Returns:\n `Statistics`, the update stats object\n \"\"\"\n stats = Statistics.all_gather_stats_list([stat], max_size=max_size)\n return stats[0]\n\n @staticmethod\n def all_gather_stats_list(stat_list, max_size=4096):\n \"\"\"\n Gather a `Statistics` list accross all processes/nodes\n\n Args:\n stat_list(list([`Statistics`])): list of statistics objects to\n gather accross all processes/nodes\n max_size(int): max buffer size to use\n\n Returns:\n our_stats(list([`Statistics`])): list of updated stats\n \"\"\"\n # Get a list of world_size lists with len(stat_list) Statistics objects\n all_stats = all_gather_list(stat_list, max_size=max_size)\n\n our_rank = get_rank()\n our_stats = all_stats[our_rank]\n for other_rank, stats in enumerate(all_stats):\n if other_rank == our_rank:\n continue\n for i, stat in enumerate(stats):\n our_stats[i].update(stat, update_n_src_words=True)\n return our_stats\n\n def update(self, stat, update_n_src_words=False):\n \"\"\"\n Update statistics by suming values with another `Statistics` object\n\n Args:\n stat: another statistic object\n update_n_src_words(bool): whether to update (sum) `n_src_words`\n or not\n\n \"\"\"\n self.loss += stat.loss\n self.n_words += stat.n_words\n self.n_correct += stat.n_correct\n\n if update_n_src_words:\n self.n_src_words += stat.n_src_words\n\n def accuracy(self):\n \"\"\" compute accuracy \"\"\"\n return 100 * (self.n_correct / self.n_words)\n\n def xent(self):\n \"\"\" compute cross entropy \"\"\"\n return self.loss / self.n_words\n\n def ppl(self):\n \"\"\" compute perplexity \"\"\"\n return math.exp(min(self.loss / self.n_words, 100))\n\n def elapsed_time(self):\n \"\"\" compute elapsed time \"\"\"\n return time.time() - self.start_time\n\n def output(self, step, num_steps, learning_rate, start):\n \"\"\"Write out statistics to stdout.\n\n Args:\n step (int): current step\n n_batch (int): total batches\n start (int): start time of step.\n \"\"\"\n t = self.elapsed_time()\n logger.info(\n (\"Step %2d/%5d; acc: %6.2f; ppl: %5.2f; xent: %4.2f; \" +\n \"lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec\")\n % (step, num_steps,\n self.accuracy(),\n self.ppl(),\n self.xent(),\n learning_rate,\n self.n_src_words / (t + 1e-5),\n self.n_words / (t + 1e-5),\n time.time() - start))\n sys.stdout.flush()\n\n def log_tensorboard(self, prefix, writer, learning_rate, step):\n \"\"\" display statistics to tensorboard \"\"\"\n t = self.elapsed_time()\n writer.add_scalar(prefix + \"/xent\", self.xent(), step)\n writer.add_scalar(prefix + \"/ppl\", self.ppl(), step)\n writer.add_scalar(prefix + \"/accuracy\", self.accuracy(), step)\n writer.add_scalar(prefix + \"/tgtper\", self.n_words / t, step)\n writer.add_scalar(prefix + \"/lr\", learning_rate, step)\n", "path": "onmt/utils/statistics.py"}], "after_files": [{"content": "\"\"\" Statistics calculation utility \"\"\"\nfrom __future__ import division\nimport time\nimport math\nimport sys\n\nfrom onmt.utils.logging import logger\n\n\nclass Statistics(object):\n \"\"\"\n Accumulator for loss statistics.\n Currently calculates:\n\n * accuracy\n * perplexity\n * elapsed time\n \"\"\"\n\n def __init__(self, loss=0, n_words=0, n_correct=0):\n self.loss = loss\n self.n_words = n_words\n self.n_correct = n_correct\n self.n_src_words = 0\n self.start_time = time.time()\n\n @staticmethod\n def all_gather_stats(stat, max_size=4096):\n \"\"\"\n Gather a `Statistics` object accross multiple process/nodes\n\n Args:\n stat(:obj:Statistics): the statistics object to gather\n accross all processes/nodes\n max_size(int): max buffer size to use\n\n Returns:\n `Statistics`, the update stats object\n \"\"\"\n stats = Statistics.all_gather_stats_list([stat], max_size=max_size)\n return stats[0]\n\n @staticmethod\n def all_gather_stats_list(stat_list, max_size=4096):\n \"\"\"\n Gather a `Statistics` list accross all processes/nodes\n\n Args:\n stat_list(list([`Statistics`])): list of statistics objects to\n gather accross all processes/nodes\n max_size(int): max buffer size to use\n\n Returns:\n our_stats(list([`Statistics`])): list of updated stats\n \"\"\"\n from torch.distributed import get_rank\n from onmt.utils.distributed import all_gather_list\n\n # Get a list of world_size lists with len(stat_list) Statistics objects\n all_stats = all_gather_list(stat_list, max_size=max_size)\n\n our_rank = get_rank()\n our_stats = all_stats[our_rank]\n for other_rank, stats in enumerate(all_stats):\n if other_rank == our_rank:\n continue\n for i, stat in enumerate(stats):\n our_stats[i].update(stat, update_n_src_words=True)\n return our_stats\n\n def update(self, stat, update_n_src_words=False):\n \"\"\"\n Update statistics by suming values with another `Statistics` object\n\n Args:\n stat: another statistic object\n update_n_src_words(bool): whether to update (sum) `n_src_words`\n or not\n\n \"\"\"\n self.loss += stat.loss\n self.n_words += stat.n_words\n self.n_correct += stat.n_correct\n\n if update_n_src_words:\n self.n_src_words += stat.n_src_words\n\n def accuracy(self):\n \"\"\" compute accuracy \"\"\"\n return 100 * (self.n_correct / self.n_words)\n\n def xent(self):\n \"\"\" compute cross entropy \"\"\"\n return self.loss / self.n_words\n\n def ppl(self):\n \"\"\" compute perplexity \"\"\"\n return math.exp(min(self.loss / self.n_words, 100))\n\n def elapsed_time(self):\n \"\"\" compute elapsed time \"\"\"\n return time.time() - self.start_time\n\n def output(self, step, num_steps, learning_rate, start):\n \"\"\"Write out statistics to stdout.\n\n Args:\n step (int): current step\n n_batch (int): total batches\n start (int): start time of step.\n \"\"\"\n t = self.elapsed_time()\n logger.info(\n (\"Step %2d/%5d; acc: %6.2f; ppl: %5.2f; xent: %4.2f; \" +\n \"lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec\")\n % (step, num_steps,\n self.accuracy(),\n self.ppl(),\n self.xent(),\n learning_rate,\n self.n_src_words / (t + 1e-5),\n self.n_words / (t + 1e-5),\n time.time() - start))\n sys.stdout.flush()\n\n def log_tensorboard(self, prefix, writer, learning_rate, step):\n \"\"\" display statistics to tensorboard \"\"\"\n t = self.elapsed_time()\n writer.add_scalar(prefix + \"/xent\", self.xent(), step)\n writer.add_scalar(prefix + \"/ppl\", self.ppl(), step)\n writer.add_scalar(prefix + \"/accuracy\", self.accuracy(), step)\n writer.add_scalar(prefix + \"/tgtper\", self.n_words / t, step)\n writer.add_scalar(prefix + \"/lr\", learning_rate, step)\n", "path": "onmt/utils/statistics.py"}]}
1,592
171
gh_patches_debug_19889
rasdani/github-patches
git_diff
inventree__InvenTree-2427
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] could not serialize access due to concurrent update could not serialize access due to concurrent update and just crash.... --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `InvenTree/InvenTree/exchange.py` Content: ``` 1 from common.settings import currency_code_default, currency_codes 2 from urllib.error import HTTPError, URLError 3 4 from djmoney.contrib.exchange.backends.base import SimpleExchangeBackend 5 6 7 class InvenTreeExchange(SimpleExchangeBackend): 8 """ 9 Backend for automatically updating currency exchange rates. 10 11 Uses the exchangerate.host service API 12 """ 13 14 name = "InvenTreeExchange" 15 16 def __init__(self): 17 self.url = "https://api.exchangerate.host/latest" 18 19 super().__init__() 20 21 def get_params(self): 22 # No API key is required 23 return { 24 } 25 26 def update_rates(self, base_currency=currency_code_default()): 27 28 symbols = ','.join(currency_codes()) 29 30 try: 31 super().update_rates(base=base_currency, symbols=symbols) 32 # catch connection errors 33 except (HTTPError, URLError): 34 print('Encountered connection error while updating') 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/InvenTree/InvenTree/exchange.py b/InvenTree/InvenTree/exchange.py --- a/InvenTree/InvenTree/exchange.py +++ b/InvenTree/InvenTree/exchange.py @@ -2,6 +2,7 @@ from urllib.error import HTTPError, URLError from djmoney.contrib.exchange.backends.base import SimpleExchangeBackend +from django.db.utils import OperationalError class InvenTreeExchange(SimpleExchangeBackend): @@ -32,3 +33,12 @@ # catch connection errors except (HTTPError, URLError): print('Encountered connection error while updating') + except OperationalError as e: + if 'SerializationFailure' in e.__cause__.__class__.__name__: + print('Serialization Failure while updating exchange rates') + # We are just going to swallow this exception because the + # exchange rates will be updated later by the scheduled task + else: + # Other operational errors probably are still show stoppers + # so reraise them so that the log contains the stacktrace + raise
{"golden_diff": "diff --git a/InvenTree/InvenTree/exchange.py b/InvenTree/InvenTree/exchange.py\n--- a/InvenTree/InvenTree/exchange.py\n+++ b/InvenTree/InvenTree/exchange.py\n@@ -2,6 +2,7 @@\n from urllib.error import HTTPError, URLError\n \n from djmoney.contrib.exchange.backends.base import SimpleExchangeBackend\n+from django.db.utils import OperationalError\n \n \n class InvenTreeExchange(SimpleExchangeBackend):\n@@ -32,3 +33,12 @@\n # catch connection errors\n except (HTTPError, URLError):\n print('Encountered connection error while updating')\n+ except OperationalError as e:\n+ if 'SerializationFailure' in e.__cause__.__class__.__name__:\n+ print('Serialization Failure while updating exchange rates')\n+ # We are just going to swallow this exception because the\n+ # exchange rates will be updated later by the scheduled task\n+ else:\n+ # Other operational errors probably are still show stoppers\n+ # so reraise them so that the log contains the stacktrace\n+ raise\n", "issue": "[BUG] could not serialize access due to concurrent update\ncould not serialize access due to concurrent update\r\n\r\nand just crash....\n", "before_files": [{"content": "from common.settings import currency_code_default, currency_codes\nfrom urllib.error import HTTPError, URLError\n\nfrom djmoney.contrib.exchange.backends.base import SimpleExchangeBackend\n\n\nclass InvenTreeExchange(SimpleExchangeBackend):\n \"\"\"\n Backend for automatically updating currency exchange rates.\n\n Uses the exchangerate.host service API\n \"\"\"\n\n name = \"InvenTreeExchange\"\n\n def __init__(self):\n self.url = \"https://api.exchangerate.host/latest\"\n\n super().__init__()\n\n def get_params(self):\n # No API key is required\n return {\n }\n\n def update_rates(self, base_currency=currency_code_default()):\n\n symbols = ','.join(currency_codes())\n\n try:\n super().update_rates(base=base_currency, symbols=symbols)\n # catch connection errors\n except (HTTPError, URLError):\n print('Encountered connection error while updating')\n", "path": "InvenTree/InvenTree/exchange.py"}], "after_files": [{"content": "from common.settings import currency_code_default, currency_codes\nfrom urllib.error import HTTPError, URLError\n\nfrom djmoney.contrib.exchange.backends.base import SimpleExchangeBackend\nfrom django.db.utils import OperationalError\n\n\nclass InvenTreeExchange(SimpleExchangeBackend):\n \"\"\"\n Backend for automatically updating currency exchange rates.\n\n Uses the exchangerate.host service API\n \"\"\"\n\n name = \"InvenTreeExchange\"\n\n def __init__(self):\n self.url = \"https://api.exchangerate.host/latest\"\n\n super().__init__()\n\n def get_params(self):\n # No API key is required\n return {\n }\n\n def update_rates(self, base_currency=currency_code_default()):\n\n symbols = ','.join(currency_codes())\n\n try:\n super().update_rates(base=base_currency, symbols=symbols)\n # catch connection errors\n except (HTTPError, URLError):\n print('Encountered connection error while updating')\n except OperationalError as e:\n if 'SerializationFailure' in e.__cause__.__class__.__name__:\n print('Serialization Failure while updating exchange rates')\n # We are just going to swallow this exception because the\n # exchange rates will be updated later by the scheduled task\n else:\n # Other operational errors probably are still show stoppers\n # so reraise them so that the log contains the stacktrace\n raise\n", "path": "InvenTree/InvenTree/exchange.py"}]}
545
249
gh_patches_debug_9711
rasdani/github-patches
git_diff
kivy__python-for-android-735
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Libffi recipe fails with "unrecognized options: --enable-shared" I'm trying to use the packaged `cryptography` recipe, which includes the `Libffi` recipe as a dependency. I'm getting the following errors with `Libffi` while trying to build the apk: ``` configure: WARNING: unrecognized options: --enable-shared configure: error: cannot find install-sh, install.sh, or shtool in "." "./.." "./../.." ``` I'm using recent versions of the toolchain: p4a (checked out from github a week ago), most recent Android SDK & NDK (25.1.3 and r11c, respectively). I'm invoking p4a as follows: ``` p4a apk --sdk_dir ~/AndroidUpstream/android-sdk-linux \ --ndk_dir ~/AndroidUpstream/android-ndk-r11c \ --android_api 16 --ndk_ver r11c \ --private ~/src/TestCrypto \ --package=org.example.testcrypto \ --name=TestCrypto --version=0.5 \ --bootstrap=sdl2 \ --requirements=sdl2,python2,kivy,cryptography \ --dist_name=remotepython \ --permission=INTERNET ``` I've looked around (on web, and here on github issue tracker), but can't find explanations for this error message. Thanks in advance for any suggestions. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pythonforandroid/recipes/libffi/__init__.py` Content: ``` 1 from pythonforandroid.recipe import Recipe 2 from pythonforandroid.logger import shprint 3 from pythonforandroid.util import current_directory 4 from os.path import exists, join 5 import sh 6 import glob 7 8 9 class LibffiRecipe(Recipe): 10 name = 'libffi' 11 version = 'v3.2.1' 12 url = 'https://github.com/atgreen/libffi/archive/{version}.zip' 13 14 patches = ['remove-version-info.patch'] 15 16 def get_host(self, arch): 17 with current_directory(self.get_build_dir(arch.arch)): 18 host = None 19 with open('Makefile') as f: 20 for line in f: 21 if line.startswith('host = '): 22 host = line.strip()[7:] 23 break 24 25 if not host or not exists(host): 26 raise RuntimeError('failed to find build output! ({})' 27 .format(host)) 28 29 return host 30 31 def should_build(self, arch): 32 # return not bool(glob.glob(join(self.ctx.get_libs_dir(arch.arch), 33 # 'libffi.so*'))) 34 return not exists(join(self.ctx.get_libs_dir(arch.arch), 'libffi.so')) 35 # return not exists(join(self.ctx.get_python_install_dir(), 'lib', 36 # 'libffi.so')) 37 38 def build_arch(self, arch): 39 env = self.get_recipe_env(arch) 40 with current_directory(self.get_build_dir(arch.arch)): 41 if not exists('configure'): 42 shprint(sh.Command('./autogen.sh'), _env=env) 43 shprint(sh.Command('./configure'), '--host=' + arch.toolchain_prefix, 44 '--prefix=' + self.ctx.get_python_install_dir(), 45 '--enable-shared', _env=env) 46 shprint(sh.make, '-j5', 'libffi.la', _env=env) 47 48 49 # dlname = None 50 # with open(join(host, 'libffi.la')) as f: 51 # for line in f: 52 # if line.startswith('dlname='): 53 # dlname = line.strip()[8:-1] 54 # break 55 # 56 # if not dlname or not exists(join(host, '.libs', dlname)): 57 # raise RuntimeError('failed to locate shared object! ({})' 58 # .format(dlname)) 59 60 # shprint(sh.sed, '-i', 's/^dlname=.*$/dlname=\'libffi.so\'/', join(host, 'libffi.la')) 61 62 shprint(sh.cp, '-t', self.ctx.get_libs_dir(arch.arch), 63 join(self.get_host(arch), '.libs', 'libffi.so')) #, 64 # join(host, 'libffi.la')) 65 66 def get_include_dirs(self, arch): 67 return [join(self.get_build_dir(arch.arch), self.get_host(arch), 'include')] 68 69 70 recipe = LibffiRecipe() 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pythonforandroid/recipes/libffi/__init__.py b/pythonforandroid/recipes/libffi/__init__.py --- a/pythonforandroid/recipes/libffi/__init__.py +++ b/pythonforandroid/recipes/libffi/__init__.py @@ -40,6 +40,7 @@ with current_directory(self.get_build_dir(arch.arch)): if not exists('configure'): shprint(sh.Command('./autogen.sh'), _env=env) + shprint(sh.Command('autoreconf -vif'), _env=env) shprint(sh.Command('./configure'), '--host=' + arch.toolchain_prefix, '--prefix=' + self.ctx.get_python_install_dir(), '--enable-shared', _env=env)
{"golden_diff": "diff --git a/pythonforandroid/recipes/libffi/__init__.py b/pythonforandroid/recipes/libffi/__init__.py\n--- a/pythonforandroid/recipes/libffi/__init__.py\n+++ b/pythonforandroid/recipes/libffi/__init__.py\n@@ -40,6 +40,7 @@\n \t\twith current_directory(self.get_build_dir(arch.arch)):\n \t\t\tif not exists('configure'):\n \t\t\t\tshprint(sh.Command('./autogen.sh'), _env=env)\n+\t\t\tshprint(sh.Command('autoreconf -vif'), _env=env)\n \t\t\tshprint(sh.Command('./configure'), '--host=' + arch.toolchain_prefix,\n \t\t\t '--prefix=' + self.ctx.get_python_install_dir(),\n \t\t\t '--enable-shared', _env=env)\n", "issue": "Libffi recipe fails with \"unrecognized options: --enable-shared\"\nI'm trying to use the packaged `cryptography` recipe, which includes the `Libffi` recipe as a dependency. I'm getting the following errors with `Libffi` while trying to build the apk:\n\n```\nconfigure: WARNING: unrecognized options: --enable-shared\nconfigure: error: cannot find install-sh, install.sh, or shtool in \".\" \"./..\" \"./../..\"\n```\n\nI'm using recent versions of the toolchain: p4a (checked out from github a week ago), most recent Android SDK & NDK (25.1.3 and r11c, respectively). I'm invoking p4a as follows:\n\n```\np4a apk --sdk_dir ~/AndroidUpstream/android-sdk-linux \\\n --ndk_dir ~/AndroidUpstream/android-ndk-r11c \\\n --android_api 16 --ndk_ver r11c \\\n --private ~/src/TestCrypto \\\n --package=org.example.testcrypto \\\n --name=TestCrypto --version=0.5 \\\n --bootstrap=sdl2 \\\n --requirements=sdl2,python2,kivy,cryptography \\\n --dist_name=remotepython \\\n --permission=INTERNET\n```\n\nI've looked around (on web, and here on github issue tracker), but can't find explanations for this error message. Thanks in advance for any suggestions.\n\n", "before_files": [{"content": "from pythonforandroid.recipe import Recipe\nfrom pythonforandroid.logger import shprint\nfrom pythonforandroid.util import current_directory\nfrom os.path import exists, join\nimport sh\nimport glob\n\n\nclass LibffiRecipe(Recipe):\n\tname = 'libffi'\n\tversion = 'v3.2.1'\n\turl = 'https://github.com/atgreen/libffi/archive/{version}.zip'\n\n\tpatches = ['remove-version-info.patch']\n\n\tdef get_host(self, arch):\n\t\twith current_directory(self.get_build_dir(arch.arch)):\n\t\t\thost = None\n\t\t\twith open('Makefile') as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tif line.startswith('host = '):\n\t\t\t\t\t\thost = line.strip()[7:]\n\t\t\t\t\t\tbreak\n\n\t\t\tif not host or not exists(host):\n\t\t\t\traise RuntimeError('failed to find build output! ({})'\n\t\t\t\t .format(host))\n\t\t\t\n\t\t\treturn host\n\n\tdef should_build(self, arch):\n\t\t# return not bool(glob.glob(join(self.ctx.get_libs_dir(arch.arch),\n\t\t# 'libffi.so*')))\n\t\treturn not exists(join(self.ctx.get_libs_dir(arch.arch), 'libffi.so'))\n\t\t# return not exists(join(self.ctx.get_python_install_dir(), 'lib',\n\t\t# 'libffi.so'))\n\n\tdef build_arch(self, arch):\n\t\tenv = self.get_recipe_env(arch)\n\t\twith current_directory(self.get_build_dir(arch.arch)):\n\t\t\tif not exists('configure'):\n\t\t\t\tshprint(sh.Command('./autogen.sh'), _env=env)\n\t\t\tshprint(sh.Command('./configure'), '--host=' + arch.toolchain_prefix,\n\t\t\t '--prefix=' + self.ctx.get_python_install_dir(),\n\t\t\t '--enable-shared', _env=env)\n\t\t\tshprint(sh.make, '-j5', 'libffi.la', _env=env)\n\n\n\t\t\t# dlname = None\n\t\t\t# with open(join(host, 'libffi.la')) as f:\n\t\t\t# \tfor line in f:\n\t\t\t# \t\tif line.startswith('dlname='):\n\t\t\t# \t\t\tdlname = line.strip()[8:-1]\n\t\t\t# \t\t\tbreak\n\t\t\t# \n\t\t\t# if not dlname or not exists(join(host, '.libs', dlname)):\n\t\t\t# \traise RuntimeError('failed to locate shared object! ({})'\n\t\t\t# \t .format(dlname))\n\n\t\t\t# shprint(sh.sed, '-i', 's/^dlname=.*$/dlname=\\'libffi.so\\'/', join(host, 'libffi.la'))\n\n\t\t\tshprint(sh.cp, '-t', self.ctx.get_libs_dir(arch.arch),\n\t\t\t join(self.get_host(arch), '.libs', 'libffi.so')) #,\n\t\t\t # join(host, 'libffi.la'))\n\n\tdef get_include_dirs(self, arch):\n\t\treturn [join(self.get_build_dir(arch.arch), self.get_host(arch), 'include')]\n\n\nrecipe = LibffiRecipe()\n", "path": "pythonforandroid/recipes/libffi/__init__.py"}], "after_files": [{"content": "from pythonforandroid.recipe import Recipe\nfrom pythonforandroid.logger import shprint\nfrom pythonforandroid.util import current_directory\nfrom os.path import exists, join\nimport sh\nimport glob\n\n\nclass LibffiRecipe(Recipe):\n\tname = 'libffi'\n\tversion = 'v3.2.1'\n\turl = 'https://github.com/atgreen/libffi/archive/{version}.zip'\n\n\tpatches = ['remove-version-info.patch']\n\n\tdef get_host(self, arch):\n\t\twith current_directory(self.get_build_dir(arch.arch)):\n\t\t\thost = None\n\t\t\twith open('Makefile') as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tif line.startswith('host = '):\n\t\t\t\t\t\thost = line.strip()[7:]\n\t\t\t\t\t\tbreak\n\n\t\t\tif not host or not exists(host):\n\t\t\t\traise RuntimeError('failed to find build output! ({})'\n\t\t\t\t .format(host))\n\t\t\t\n\t\t\treturn host\n\n\tdef should_build(self, arch):\n\t\t# return not bool(glob.glob(join(self.ctx.get_libs_dir(arch.arch),\n\t\t# 'libffi.so*')))\n\t\treturn not exists(join(self.ctx.get_libs_dir(arch.arch), 'libffi.so'))\n\t\t# return not exists(join(self.ctx.get_python_install_dir(), 'lib',\n\t\t# 'libffi.so'))\n\n\tdef build_arch(self, arch):\n\t\tenv = self.get_recipe_env(arch)\n\t\twith current_directory(self.get_build_dir(arch.arch)):\n\t\t\tif not exists('configure'):\n\t\t\t\tshprint(sh.Command('./autogen.sh'), _env=env)\n\t\t\tshprint(sh.Command('autoreconf -vif'), _env=env)\n\t\t\tshprint(sh.Command('./configure'), '--host=' + arch.toolchain_prefix,\n\t\t\t '--prefix=' + self.ctx.get_python_install_dir(),\n\t\t\t '--enable-shared', _env=env)\n\t\t\tshprint(sh.make, '-j5', 'libffi.la', _env=env)\n\n\n\t\t\t# dlname = None\n\t\t\t# with open(join(host, 'libffi.la')) as f:\n\t\t\t# \tfor line in f:\n\t\t\t# \t\tif line.startswith('dlname='):\n\t\t\t# \t\t\tdlname = line.strip()[8:-1]\n\t\t\t# \t\t\tbreak\n\t\t\t# \n\t\t\t# if not dlname or not exists(join(host, '.libs', dlname)):\n\t\t\t# \traise RuntimeError('failed to locate shared object! ({})'\n\t\t\t# \t .format(dlname))\n\n\t\t\t# shprint(sh.sed, '-i', 's/^dlname=.*$/dlname=\\'libffi.so\\'/', join(host, 'libffi.la'))\n\n\t\t\tshprint(sh.cp, '-t', self.ctx.get_libs_dir(arch.arch),\n\t\t\t join(self.get_host(arch), '.libs', 'libffi.so')) #,\n\t\t\t # join(host, 'libffi.la'))\n\n\tdef get_include_dirs(self, arch):\n\t\treturn [join(self.get_build_dir(arch.arch), self.get_host(arch), 'include')]\n\n\nrecipe = LibffiRecipe()\n", "path": "pythonforandroid/recipes/libffi/__init__.py"}]}
1,341
162
gh_patches_debug_12862
rasdani/github-patches
git_diff
pyro-ppl__numpyro-987
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add required packages at the top of tutorials to run on colab --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # Copyright Contributors to the Pyro project. 2 # SPDX-License-Identifier: Apache-2.0 3 4 from __future__ import absolute_import, division, print_function 5 6 import os 7 import sys 8 9 from setuptools import find_packages, setup 10 11 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__)) 12 13 # Find version 14 for line in open(os.path.join(PROJECT_PATH, "numpyro", "version.py")): 15 if line.startswith("__version__ = "): 16 version = line.strip().split()[2][1:-1] 17 18 # READ README.md for long description on PyPi. 19 try: 20 long_description = open("README.md", encoding="utf-8").read() 21 except Exception as e: 22 sys.stderr.write("Failed to read README.md:\n {}\n".format(e)) 23 sys.stderr.flush() 24 long_description = "" 25 26 27 setup( 28 name="numpyro", 29 version=version, 30 description="Pyro PPL on NumPy", 31 packages=find_packages(include=["numpyro", "numpyro.*"]), 32 url="https://github.com/pyro-ppl/numpyro", 33 author="Uber AI Labs", 34 install_requires=[ 35 "jax>=0.2.11", 36 "jaxlib>=0.1.62", 37 "tqdm", 38 ], 39 extras_require={ 40 "doc": [ 41 "ipython", # sphinx needs this to render codes 42 "nbsphinx", 43 "sphinx", 44 "sphinx_rtd_theme", 45 "sphinx-gallery", 46 ], 47 "test": [ 48 "black", 49 "flake8", 50 "isort>=5.0", 51 "pytest>=4.1", 52 "pyro-api>=0.1.1", 53 "scipy>=1.1", 54 ], 55 "dev": [ 56 "dm-haiku", 57 "flax", 58 "funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc", 59 "graphviz", 60 "tensorflow_probability", 61 ], 62 "examples": ["arviz", "jupyter", "matplotlib", "pandas", "seaborn"], 63 }, 64 long_description=long_description, 65 long_description_content_type="text/markdown", 66 keywords="probabilistic machine learning bayesian statistics", 67 license="Apache License 2.0", 68 classifiers=[ 69 "Intended Audience :: Developers", 70 "Intended Audience :: Education", 71 "Intended Audience :: Science/Research", 72 "License :: OSI Approved :: Apache Software License", 73 "Operating System :: POSIX :: Linux", 74 "Operating System :: MacOS :: MacOS X", 75 "Programming Language :: Python :: 3.6", 76 "Programming Language :: Python :: 3.7", 77 "Programming Language :: Python :: 3.8", 78 "Programming Language :: Python :: 3.9", 79 ], 80 ) 81 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -55,9 +55,12 @@ "dev": [ "dm-haiku", "flax", + # TODO: bump funsor version before the release "funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc", "graphviz", - "tensorflow_probability", + # TODO: change this to tensorflow_probability>0.12.1 when the next version + # of tfp is released. The current release is not compatible with jax>=0.2.12. + "tfp-nightly", ], "examples": ["arviz", "jupyter", "matplotlib", "pandas", "seaborn"], },
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -55,9 +55,12 @@\n \"dev\": [\n \"dm-haiku\",\n \"flax\",\n+ # TODO: bump funsor version before the release\n \"funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc\",\n \"graphviz\",\n- \"tensorflow_probability\",\n+ # TODO: change this to tensorflow_probability>0.12.1 when the next version\n+ # of tfp is released. The current release is not compatible with jax>=0.2.12.\n+ \"tfp-nightly\",\n ],\n \"examples\": [\"arviz\", \"jupyter\", \"matplotlib\", \"pandas\", \"seaborn\"],\n },\n", "issue": "Add required packages at the top of tutorials to run on colab\n\n", "before_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# Find version\nfor line in open(os.path.join(PROJECT_PATH, \"numpyro\", \"version.py\")):\n if line.startswith(\"__version__ = \"):\n version = line.strip().split()[2][1:-1]\n\n# READ README.md for long description on PyPi.\ntry:\n long_description = open(\"README.md\", encoding=\"utf-8\").read()\nexcept Exception as e:\n sys.stderr.write(\"Failed to read README.md:\\n {}\\n\".format(e))\n sys.stderr.flush()\n long_description = \"\"\n\n\nsetup(\n name=\"numpyro\",\n version=version,\n description=\"Pyro PPL on NumPy\",\n packages=find_packages(include=[\"numpyro\", \"numpyro.*\"]),\n url=\"https://github.com/pyro-ppl/numpyro\",\n author=\"Uber AI Labs\",\n install_requires=[\n \"jax>=0.2.11\",\n \"jaxlib>=0.1.62\",\n \"tqdm\",\n ],\n extras_require={\n \"doc\": [\n \"ipython\", # sphinx needs this to render codes\n \"nbsphinx\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinx-gallery\",\n ],\n \"test\": [\n \"black\",\n \"flake8\",\n \"isort>=5.0\",\n \"pytest>=4.1\",\n \"pyro-api>=0.1.1\",\n \"scipy>=1.1\",\n ],\n \"dev\": [\n \"dm-haiku\",\n \"flax\",\n \"funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc\",\n \"graphviz\",\n \"tensorflow_probability\",\n ],\n \"examples\": [\"arviz\", \"jupyter\", \"matplotlib\", \"pandas\", \"seaborn\"],\n },\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"probabilistic machine learning bayesian statistics\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# Find version\nfor line in open(os.path.join(PROJECT_PATH, \"numpyro\", \"version.py\")):\n if line.startswith(\"__version__ = \"):\n version = line.strip().split()[2][1:-1]\n\n# READ README.md for long description on PyPi.\ntry:\n long_description = open(\"README.md\", encoding=\"utf-8\").read()\nexcept Exception as e:\n sys.stderr.write(\"Failed to read README.md:\\n {}\\n\".format(e))\n sys.stderr.flush()\n long_description = \"\"\n\n\nsetup(\n name=\"numpyro\",\n version=version,\n description=\"Pyro PPL on NumPy\",\n packages=find_packages(include=[\"numpyro\", \"numpyro.*\"]),\n url=\"https://github.com/pyro-ppl/numpyro\",\n author=\"Uber AI Labs\",\n install_requires=[\n \"jax>=0.2.11\",\n \"jaxlib>=0.1.62\",\n \"tqdm\",\n ],\n extras_require={\n \"doc\": [\n \"ipython\", # sphinx needs this to render codes\n \"nbsphinx\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinx-gallery\",\n ],\n \"test\": [\n \"black\",\n \"flake8\",\n \"isort>=5.0\",\n \"pytest>=4.1\",\n \"pyro-api>=0.1.1\",\n \"scipy>=1.1\",\n ],\n \"dev\": [\n \"dm-haiku\",\n \"flax\",\n # TODO: bump funsor version before the release\n \"funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc\",\n \"graphviz\",\n # TODO: change this to tensorflow_probability>0.12.1 when the next version\n # of tfp is released. The current release is not compatible with jax>=0.2.12.\n \"tfp-nightly\",\n ],\n \"examples\": [\"arviz\", \"jupyter\", \"matplotlib\", \"pandas\", \"seaborn\"],\n },\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"probabilistic machine learning bayesian statistics\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n)\n", "path": "setup.py"}]}
1,063
216
gh_patches_debug_18571
rasdani/github-patches
git_diff
urllib3__urllib3-823
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot run tests on Ubuntu 14.04 LTS + Python 3 #### Repro Steps ``` # (Install Docker from: http://www.docker.com ) # If OS X, start with: $ docker-machine start default; eval "$(docker-machine env default)" # If OS X or Linux, continue with: $ docker run -it ubuntu:trusty # Ubuntu 14.04 LTS $$ apt-get update $$ apt-get install git -y $$ apt-get install python3-pip -y $$ pip3 install virtualenv $$ git clone https://github.com/shazow/urllib3 $$ cd urllib3/ $$ virtualenv venv $$ source venv/bin/activate $$ make test ``` #### Output ``` python setup.py develop Traceback (most recent call last): File "setup.py", line 23, in <module> long_description=open('README.rst').read() + '\n\n' + open('CHANGES.rst').read(), File "/urllib3/venv/lib/python3.4/encodings/ascii.py", line 26, in decode return codecs.ascii_decode(input, self.errors)[0] UnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 4597: ordinal not in range(128) make: *** [*.egg-info] Error 1 ``` #### Notes My best guess is that the `read()`s in setup.py should have an encoding specified. Probably UTF-8. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 from setuptools import setup 4 5 import os 6 import re 7 8 9 base_path = os.path.dirname(__file__) 10 11 # Get the version (borrowed from SQLAlchemy) 12 fp = open(os.path.join(base_path, 'urllib3', '__init__.py')) 13 VERSION = re.compile(r".*__version__ = '(.*?)'", 14 re.S).match(fp.read()).group(1) 15 fp.close() 16 17 18 version = VERSION 19 20 setup(name='urllib3', 21 version=version, 22 description="HTTP library with thread-safe connection pooling, file post, and more.", 23 long_description=open('README.rst').read() + '\n\n' + open('CHANGES.rst').read(), 24 classifiers=[ 25 'Environment :: Web Environment', 26 'Intended Audience :: Developers', 27 'License :: OSI Approved :: MIT License', 28 'Operating System :: OS Independent', 29 'Programming Language :: Python', 30 'Programming Language :: Python :: 2', 31 'Programming Language :: Python :: 3', 32 'Topic :: Internet :: WWW/HTTP', 33 'Topic :: Software Development :: Libraries', 34 ], 35 keywords='urllib httplib threadsafe filepost http https ssl pooling', 36 author='Andrey Petrov', 37 author_email='andrey.petrov@shazow.net', 38 url='http://urllib3.readthedocs.org/', 39 license='MIT', 40 packages=['urllib3', 41 'urllib3.packages', 'urllib3.packages.ssl_match_hostname', 42 'urllib3.contrib', 'urllib3.util', 43 ], 44 requires=[], 45 tests_require=[ 46 # These are a less-specific subset of dev-requirements.txt, for the 47 # convenience of distro package maintainers. 48 'nose', 49 'mock', 50 'tornado', 51 ], 52 test_suite='test', 53 extras_require={ 54 'secure': [ 55 'pyOpenSSL>=0.13', 56 'ndg-httpsclient', 57 'pyasn1', 58 'certifi', 59 ], 60 'socks': [ 61 'PySocks>=1.5.6,<2.0', 62 ] 63 }, 64 ) 65 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ import os import re - +import codecs base_path = os.path.dirname(__file__) @@ -14,13 +14,14 @@ re.S).match(fp.read()).group(1) fp.close() - +readme = codecs.open('README.rst', encoding='utf-8').read() +changes = codecs.open('README.rst', encoding='utf-8').read() version = VERSION setup(name='urllib3', version=version, description="HTTP library with thread-safe connection pooling, file post, and more.", - long_description=open('README.rst').read() + '\n\n' + open('CHANGES.rst').read(), + long_description=u'\n\n'.join([readme, changes]), classifiers=[ 'Environment :: Web Environment', 'Intended Audience :: Developers',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,7 +4,7 @@\n \n import os\n import re\n-\n+import codecs\n \n base_path = os.path.dirname(__file__)\n \n@@ -14,13 +14,14 @@\n re.S).match(fp.read()).group(1)\n fp.close()\n \n-\n+readme = codecs.open('README.rst', encoding='utf-8').read()\n+changes = codecs.open('README.rst', encoding='utf-8').read()\n version = VERSION\n \n setup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n- long_description=open('README.rst').read() + '\\n\\n' + open('CHANGES.rst').read(),\n+ long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n", "issue": "Cannot run tests on Ubuntu 14.04 LTS + Python 3\n#### Repro Steps\n\n```\n# (Install Docker from: http://www.docker.com )\n\n# If OS X, start with:\n$ docker-machine start default; eval \"$(docker-machine env default)\"\n\n# If OS X or Linux, continue with:\n$ docker run -it ubuntu:trusty # Ubuntu 14.04 LTS\n$$ apt-get update\n$$ apt-get install git -y\n$$ apt-get install python3-pip -y\n$$ pip3 install virtualenv\n$$ git clone https://github.com/shazow/urllib3\n$$ cd urllib3/\n$$ virtualenv venv\n$$ source venv/bin/activate\n$$ make test\n```\n#### Output\n\n```\npython setup.py develop\nTraceback (most recent call last):\n File \"setup.py\", line 23, in <module>\n long_description=open('README.rst').read() + '\\n\\n' + open('CHANGES.rst').read(),\n File \"/urllib3/venv/lib/python3.4/encodings/ascii.py\", line 26, in decode\n return codecs.ascii_decode(input, self.errors)[0]\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 4597: ordinal not in range(128)\nmake: *** [*.egg-info] Error 1\n```\n#### Notes\n\nMy best guess is that the `read()`s in setup.py should have an encoding specified. Probably UTF-8.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\n\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nfp = open(os.path.join(base_path, 'urllib3', '__init__.py'))\nVERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\nfp.close()\n\n\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=open('README.rst').read() + '\\n\\n' + open('CHANGES.rst').read(),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='andrey.petrov@shazow.net',\n url='http://urllib3.readthedocs.org/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.contrib', 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'nose',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.13',\n 'ndg-httpsclient',\n 'pyasn1',\n 'certifi',\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0',\n ]\n },\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nfp = open(os.path.join(base_path, 'urllib3', '__init__.py'))\nVERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\nfp.close()\n\nreadme = codecs.open('README.rst', encoding='utf-8').read()\nchanges = codecs.open('README.rst', encoding='utf-8').read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='andrey.petrov@shazow.net',\n url='http://urllib3.readthedocs.org/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.contrib', 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'nose',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.13',\n 'ndg-httpsclient',\n 'pyasn1',\n 'certifi',\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0',\n ]\n },\n )\n", "path": "setup.py"}]}
1,168
214
gh_patches_debug_9009
rasdani/github-patches
git_diff
CTFd__CTFd-2091
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Page preview doesn't consider format Page preview needs to take into account format when previewing --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `CTFd/admin/pages.py` Content: ``` 1 from flask import render_template, request 2 3 from CTFd.admin import admin 4 from CTFd.models import Pages 5 from CTFd.schemas.pages import PageSchema 6 from CTFd.utils import markdown 7 from CTFd.utils.decorators import admins_only 8 9 10 @admin.route("/admin/pages") 11 @admins_only 12 def pages_listing(): 13 pages = Pages.query.all() 14 return render_template("admin/pages.html", pages=pages) 15 16 17 @admin.route("/admin/pages/new") 18 @admins_only 19 def pages_new(): 20 return render_template("admin/editor.html") 21 22 23 @admin.route("/admin/pages/preview", methods=["POST"]) 24 @admins_only 25 def pages_preview(): 26 # We only care about content. 27 # Loading other attributes improperly will cause Marshmallow to incorrectly return a dict 28 data = {"content": request.form.get("content")} 29 schema = PageSchema() 30 page = schema.load(data) 31 return render_template("page.html", content=page.data.html) 32 33 34 @admin.route("/admin/pages/<int:page_id>") 35 @admins_only 36 def pages_detail(page_id): 37 page = Pages.query.filter_by(id=page_id).first_or_404() 38 page_op = request.args.get("operation") 39 40 if request.method == "GET" and page_op == "preview": 41 return render_template("page.html", content=markdown(page.content)) 42 43 if request.method == "GET" and page_op == "create": 44 return render_template("admin/editor.html") 45 46 return render_template("admin/editor.html", page=page) 47 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/CTFd/admin/pages.py b/CTFd/admin/pages.py --- a/CTFd/admin/pages.py +++ b/CTFd/admin/pages.py @@ -25,7 +25,10 @@ def pages_preview(): # We only care about content. # Loading other attributes improperly will cause Marshmallow to incorrectly return a dict - data = {"content": request.form.get("content")} + data = { + "content": request.form.get("content"), + "format": request.form.get("format"), + } schema = PageSchema() page = schema.load(data) return render_template("page.html", content=page.data.html)
{"golden_diff": "diff --git a/CTFd/admin/pages.py b/CTFd/admin/pages.py\n--- a/CTFd/admin/pages.py\n+++ b/CTFd/admin/pages.py\n@@ -25,7 +25,10 @@\n def pages_preview():\n # We only care about content.\n # Loading other attributes improperly will cause Marshmallow to incorrectly return a dict\n- data = {\"content\": request.form.get(\"content\")}\n+ data = {\n+ \"content\": request.form.get(\"content\"),\n+ \"format\": request.form.get(\"format\"),\n+ }\n schema = PageSchema()\n page = schema.load(data)\n return render_template(\"page.html\", content=page.data.html)\n", "issue": "Page preview doesn't consider format\nPage preview needs to take into account format when previewing\n", "before_files": [{"content": "from flask import render_template, request\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Pages\nfrom CTFd.schemas.pages import PageSchema\nfrom CTFd.utils import markdown\nfrom CTFd.utils.decorators import admins_only\n\n\n@admin.route(\"/admin/pages\")\n@admins_only\ndef pages_listing():\n pages = Pages.query.all()\n return render_template(\"admin/pages.html\", pages=pages)\n\n\n@admin.route(\"/admin/pages/new\")\n@admins_only\ndef pages_new():\n return render_template(\"admin/editor.html\")\n\n\n@admin.route(\"/admin/pages/preview\", methods=[\"POST\"])\n@admins_only\ndef pages_preview():\n # We only care about content.\n # Loading other attributes improperly will cause Marshmallow to incorrectly return a dict\n data = {\"content\": request.form.get(\"content\")}\n schema = PageSchema()\n page = schema.load(data)\n return render_template(\"page.html\", content=page.data.html)\n\n\n@admin.route(\"/admin/pages/<int:page_id>\")\n@admins_only\ndef pages_detail(page_id):\n page = Pages.query.filter_by(id=page_id).first_or_404()\n page_op = request.args.get(\"operation\")\n\n if request.method == \"GET\" and page_op == \"preview\":\n return render_template(\"page.html\", content=markdown(page.content))\n\n if request.method == \"GET\" and page_op == \"create\":\n return render_template(\"admin/editor.html\")\n\n return render_template(\"admin/editor.html\", page=page)\n", "path": "CTFd/admin/pages.py"}], "after_files": [{"content": "from flask import render_template, request\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Pages\nfrom CTFd.schemas.pages import PageSchema\nfrom CTFd.utils import markdown\nfrom CTFd.utils.decorators import admins_only\n\n\n@admin.route(\"/admin/pages\")\n@admins_only\ndef pages_listing():\n pages = Pages.query.all()\n return render_template(\"admin/pages.html\", pages=pages)\n\n\n@admin.route(\"/admin/pages/new\")\n@admins_only\ndef pages_new():\n return render_template(\"admin/editor.html\")\n\n\n@admin.route(\"/admin/pages/preview\", methods=[\"POST\"])\n@admins_only\ndef pages_preview():\n # We only care about content.\n # Loading other attributes improperly will cause Marshmallow to incorrectly return a dict\n data = {\n \"content\": request.form.get(\"content\"),\n \"format\": request.form.get(\"format\"),\n }\n schema = PageSchema()\n page = schema.load(data)\n return render_template(\"page.html\", content=page.data.html)\n\n\n@admin.route(\"/admin/pages/<int:page_id>\")\n@admins_only\ndef pages_detail(page_id):\n page = Pages.query.filter_by(id=page_id).first_or_404()\n page_op = request.args.get(\"operation\")\n\n if request.method == \"GET\" and page_op == \"preview\":\n return render_template(\"page.html\", content=markdown(page.content))\n\n if request.method == \"GET\" and page_op == \"create\":\n return render_template(\"admin/editor.html\")\n\n return render_template(\"admin/editor.html\", page=page)\n", "path": "CTFd/admin/pages.py"}]}
689
147
gh_patches_debug_23957
rasdani/github-patches
git_diff
crytic__slither-601
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AssertionError parsing function-type return values The following contract causes an assertion error: ``` contract FunctionReturningFunction { function f() public { } function g() internal returns (function() internal) { return f; } } ``` Output: ``` ERROR:root:Error in .\function_returning_function.sol ERROR:root:Traceback (most recent call last): File "c:\users\x\documents\github\slither\slither\__main__.py", line 610, in main_impl (slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args, File "c:\users\x\documents\github\slither\slither\__main__.py", line 67, in process_all (slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single( File "c:\users\x\documents\github\slither\slither\__main__.py", line 53, in process_single slither = Slither(target, File "c:\users\x\documents\github\slither\slither\slither.py", line 86, in __init__ self._parser.analyze_contracts() File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 345, in analyze_contracts self._convert_to_slithir() File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 489, in _convert_to_slithir func.generate_slithir_and_analyze() File "c:\users\x\documents\github\slither\slither\core\declarations\function.py", line 1652, in generate_slithir_and_analyze node.slithir_generation() File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 702, in slithir_generation self._irs = convert_expression(expression, self) File "c:\users\x\documents\github\slither\slither\slithir\convert.py", line 64, in convert_expression visitor = ExpressionToSlithIR(expression, node) File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 105, in __init__ r = Return(get(self.expression)) File "c:\users\x\documents\github\slither\slither\slithir\operations\return_operation.py", line 18, in __init__ assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None AssertionError ``` AssertionError parsing function-type return values The following contract causes an assertion error: ``` contract FunctionReturningFunction { function f() public { } function g() internal returns (function() internal) { return f; } } ``` Output: ``` ERROR:root:Error in .\function_returning_function.sol ERROR:root:Traceback (most recent call last): File "c:\users\x\documents\github\slither\slither\__main__.py", line 610, in main_impl (slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args, File "c:\users\x\documents\github\slither\slither\__main__.py", line 67, in process_all (slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single( File "c:\users\x\documents\github\slither\slither\__main__.py", line 53, in process_single slither = Slither(target, File "c:\users\x\documents\github\slither\slither\slither.py", line 86, in __init__ self._parser.analyze_contracts() File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 345, in analyze_contracts self._convert_to_slithir() File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 489, in _convert_to_slithir func.generate_slithir_and_analyze() File "c:\users\x\documents\github\slither\slither\core\declarations\function.py", line 1652, in generate_slithir_and_analyze node.slithir_generation() File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 702, in slithir_generation self._irs = convert_expression(expression, self) File "c:\users\x\documents\github\slither\slither\slithir\convert.py", line 64, in convert_expression visitor = ExpressionToSlithIR(expression, node) File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 105, in __init__ r = Return(get(self.expression)) File "c:\users\x\documents\github\slither\slither\slithir\operations\return_operation.py", line 18, in __init__ assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None AssertionError ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `slither/slithir/operations/return_operation.py` Content: ``` 1 from slither.slithir.operations.operation import Operation 2 3 from slither.slithir.variables.tuple import TupleVariable 4 from slither.slithir.utils.utils import is_valid_rvalue 5 6 7 class Return(Operation): 8 """ 9 Return 10 Only present as last operation in RETURN node 11 """ 12 13 def __init__(self, values): 14 # Note: Can return None 15 # ex: return call() 16 # where call() dont return 17 if not isinstance(values, list): 18 assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None 19 if values is None: 20 values = [] 21 else: 22 values = [values] 23 else: 24 # Remove None 25 # Prior Solidity 0.5 26 # return (0,) 27 # was valid for returns(uint) 28 values = [v for v in values if not v is None] 29 self._valid_value(values) 30 super(Return, self).__init__() 31 self._values = values 32 33 def _valid_value(self, value): 34 if isinstance(value, list): 35 assert all(self._valid_value(v) for v in value) 36 else: 37 assert is_valid_rvalue(value) or isinstance(value, TupleVariable) 38 return True 39 40 @property 41 def read(self): 42 return self._unroll(self.values) 43 44 @property 45 def values(self): 46 return self._unroll(self._values) 47 48 def __str__(self): 49 return "RETURN {}".format(','.join(['{}'.format(x) for x in self.values])) 50 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/slither/slithir/operations/return_operation.py b/slither/slithir/operations/return_operation.py --- a/slither/slithir/operations/return_operation.py +++ b/slither/slithir/operations/return_operation.py @@ -1,3 +1,4 @@ +from slither.core.declarations import Function from slither.slithir.operations.operation import Operation from slither.slithir.variables.tuple import TupleVariable @@ -15,7 +16,7 @@ # ex: return call() # where call() dont return if not isinstance(values, list): - assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None + assert is_valid_rvalue(values) or isinstance(values, (TupleVariable, Function)) or values is None if values is None: values = [] else: @@ -34,7 +35,7 @@ if isinstance(value, list): assert all(self._valid_value(v) for v in value) else: - assert is_valid_rvalue(value) or isinstance(value, TupleVariable) + assert is_valid_rvalue(value) or isinstance(value, (TupleVariable, Function)) return True @property
{"golden_diff": "diff --git a/slither/slithir/operations/return_operation.py b/slither/slithir/operations/return_operation.py\n--- a/slither/slithir/operations/return_operation.py\n+++ b/slither/slithir/operations/return_operation.py\n@@ -1,3 +1,4 @@\n+from slither.core.declarations import Function\n from slither.slithir.operations.operation import Operation\n \n from slither.slithir.variables.tuple import TupleVariable\n@@ -15,7 +16,7 @@\n # ex: return call()\n # where call() dont return\n if not isinstance(values, list):\n- assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None\n+ assert is_valid_rvalue(values) or isinstance(values, (TupleVariable, Function)) or values is None\n if values is None:\n values = []\n else:\n@@ -34,7 +35,7 @@\n if isinstance(value, list):\n assert all(self._valid_value(v) for v in value)\n else:\n- assert is_valid_rvalue(value) or isinstance(value, TupleVariable)\n+ assert is_valid_rvalue(value) or isinstance(value, (TupleVariable, Function))\n return True\n \n @property\n", "issue": "AssertionError parsing function-type return values\nThe following contract causes an assertion error:\r\n```\r\ncontract FunctionReturningFunction {\r\n function f() public { }\r\n\r\n function g() internal returns (function() internal) {\r\n return f;\r\n }\r\n}\r\n```\r\n\r\nOutput:\r\n```\r\nERROR:root:Error in .\\function_returning_function.sol\r\nERROR:root:Traceback (most recent call last):\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 610, in main_impl\r\n (slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 67, in process_all\r\n (slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 53, in process_single\r\n slither = Slither(target,\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slither.py\", line 86, in __init__\r\n self._parser.analyze_contracts()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\solc_parsing\\slitherSolc.py\", line 345, in analyze_contracts\r\n self._convert_to_slithir()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\solc_parsing\\slitherSolc.py\", line 489, in _convert_to_slithir\r\n func.generate_slithir_and_analyze()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\core\\declarations\\function.py\", line 1652, in generate_slithir_and_analyze\r\n node.slithir_generation()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\core\\cfg\\node.py\", line 702, in slithir_generation\r\n self._irs = convert_expression(expression, self)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slithir\\convert.py\", line 64, in convert_expression\r\n visitor = ExpressionToSlithIR(expression, node)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\visitors\\slithir\\expression_to_slithir.py\", line 105, in __init__\r\n r = Return(get(self.expression))\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slithir\\operations\\return_operation.py\", line 18, in __init__\r\n assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None\r\nAssertionError\r\n```\nAssertionError parsing function-type return values\nThe following contract causes an assertion error:\r\n```\r\ncontract FunctionReturningFunction {\r\n function f() public { }\r\n\r\n function g() internal returns (function() internal) {\r\n return f;\r\n }\r\n}\r\n```\r\n\r\nOutput:\r\n```\r\nERROR:root:Error in .\\function_returning_function.sol\r\nERROR:root:Traceback (most recent call last):\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 610, in main_impl\r\n (slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 67, in process_all\r\n (slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 53, in process_single\r\n slither = Slither(target,\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slither.py\", line 86, in __init__\r\n self._parser.analyze_contracts()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\solc_parsing\\slitherSolc.py\", line 345, in analyze_contracts\r\n self._convert_to_slithir()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\solc_parsing\\slitherSolc.py\", line 489, in _convert_to_slithir\r\n func.generate_slithir_and_analyze()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\core\\declarations\\function.py\", line 1652, in generate_slithir_and_analyze\r\n node.slithir_generation()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\core\\cfg\\node.py\", line 702, in slithir_generation\r\n self._irs = convert_expression(expression, self)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slithir\\convert.py\", line 64, in convert_expression\r\n visitor = ExpressionToSlithIR(expression, node)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\visitors\\slithir\\expression_to_slithir.py\", line 105, in __init__\r\n r = Return(get(self.expression))\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slithir\\operations\\return_operation.py\", line 18, in __init__\r\n assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None\r\nAssertionError\r\n```\n", "before_files": [{"content": "from slither.slithir.operations.operation import Operation\n\nfrom slither.slithir.variables.tuple import TupleVariable\nfrom slither.slithir.utils.utils import is_valid_rvalue\n\n\nclass Return(Operation):\n \"\"\"\n Return\n Only present as last operation in RETURN node\n \"\"\"\n\n def __init__(self, values):\n # Note: Can return None \n # ex: return call()\n # where call() dont return\n if not isinstance(values, list):\n assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None\n if values is None:\n values = []\n else:\n values = [values]\n else:\n # Remove None\n # Prior Solidity 0.5\n # return (0,)\n # was valid for returns(uint)\n values = [v for v in values if not v is None]\n self._valid_value(values)\n super(Return, self).__init__()\n self._values = values\n\n def _valid_value(self, value):\n if isinstance(value, list):\n assert all(self._valid_value(v) for v in value)\n else:\n assert is_valid_rvalue(value) or isinstance(value, TupleVariable)\n return True\n\n @property\n def read(self):\n return self._unroll(self.values)\n\n @property\n def values(self):\n return self._unroll(self._values)\n\n def __str__(self):\n return \"RETURN {}\".format(','.join(['{}'.format(x) for x in self.values]))\n", "path": "slither/slithir/operations/return_operation.py"}], "after_files": [{"content": "from slither.core.declarations import Function\nfrom slither.slithir.operations.operation import Operation\n\nfrom slither.slithir.variables.tuple import TupleVariable\nfrom slither.slithir.utils.utils import is_valid_rvalue\n\n\nclass Return(Operation):\n \"\"\"\n Return\n Only present as last operation in RETURN node\n \"\"\"\n\n def __init__(self, values):\n # Note: Can return None \n # ex: return call()\n # where call() dont return\n if not isinstance(values, list):\n assert is_valid_rvalue(values) or isinstance(values, (TupleVariable, Function)) or values is None\n if values is None:\n values = []\n else:\n values = [values]\n else:\n # Remove None\n # Prior Solidity 0.5\n # return (0,)\n # was valid for returns(uint)\n values = [v for v in values if not v is None]\n self._valid_value(values)\n super(Return, self).__init__()\n self._values = values\n\n def _valid_value(self, value):\n if isinstance(value, list):\n assert all(self._valid_value(v) for v in value)\n else:\n assert is_valid_rvalue(value) or isinstance(value, (TupleVariable, Function))\n return True\n\n @property\n def read(self):\n return self._unroll(self.values)\n\n @property\n def values(self):\n return self._unroll(self._values)\n\n def __str__(self):\n return \"RETURN {}\".format(','.join(['{}'.format(x) for x in self.values]))\n", "path": "slither/slithir/operations/return_operation.py"}]}
1,947
272
gh_patches_debug_25284
rasdani/github-patches
git_diff
pydantic__pydantic-740
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- __post_init__ is not triggered for descendant dataclass The `__post_init__` method of parent is not called in child classes. * OS: **Ubuntu** * Python version `import sys; print(sys.version)`: **3.7.2 (default, Mar 25 2019, 19:29:53) ** * Pydantic version `import pydantic; print(pydantic.VERSION)`: **0.32.1** ```py from dataclasses import dataclass, field from typing import Any # comment the pydantinc import below to see the expected result from pydantic.dataclasses import dataclass @dataclass class Base: x: float y: float def __post_init__(self): print('Called!') self.z = self.x + self.y @dataclass class Child(Base): a: int obj = Child(a=0, x=1.5, y=2.5) print(obj.z) ``` Everything works fine when using the `dataclass` from standard library. After looking through the code I expected that an easy workaround would be to add simple `__post_init__` in child: ```python @dataclass class Child(Base): a: int def __post_init__(self): super().__post_init__() ``` Now I do get 'Called!' message, but the 'z' attribute is not preserved anyway... Do I miss something obvious in the usage of pydantic? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pydantic/dataclasses.py` Content: ``` 1 import dataclasses 2 from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, Optional, Type, Union 3 4 from .class_validators import gather_validators 5 from .error_wrappers import ValidationError 6 from .errors import DataclassTypeError 7 from .fields import Required 8 from .main import create_model, validate_model 9 from .utils import AnyType 10 11 if TYPE_CHECKING: # pragma: no cover 12 from .main import BaseConfig, BaseModel # noqa: F401 13 14 class DataclassType: 15 __pydantic_model__: Type[BaseModel] 16 __initialised__: bool 17 18 def __init__(self, *args: Any, **kwargs: Any) -> None: 19 pass 20 21 @classmethod 22 def __validate__(cls, v: Any) -> 'DataclassType': 23 pass 24 25 26 def _validate_dataclass(cls: Type['DataclassType'], v: Any) -> 'DataclassType': 27 if isinstance(v, cls): 28 return v 29 elif isinstance(v, (list, tuple)): 30 return cls(*v) 31 elif isinstance(v, dict): 32 return cls(**v) 33 else: 34 raise DataclassTypeError(class_name=cls.__name__) 35 36 37 def _get_validators(cls: Type['DataclassType']) -> Generator[Any, None, None]: 38 yield cls.__validate__ 39 40 41 def setattr_validate_assignment(self: 'DataclassType', name: str, value: Any) -> None: 42 if self.__initialised__: 43 d = dict(self.__dict__) 44 d.pop(name, None) 45 known_field = self.__pydantic_model__.__fields__.get(name, None) 46 if known_field: 47 value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__) 48 if error_: 49 raise ValidationError([error_], type(self)) 50 51 object.__setattr__(self, name, value) 52 53 54 def _process_class( 55 _cls: AnyType, 56 init: bool, 57 repr: bool, 58 eq: bool, 59 order: bool, 60 unsafe_hash: bool, 61 frozen: bool, 62 config: Type['BaseConfig'], 63 ) -> 'DataclassType': 64 post_init_original = getattr(_cls, '__post_init__', None) 65 post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None) 66 if post_init_original and post_init_original.__name__ == '_pydantic_post_init': 67 post_init_original = None 68 69 def _pydantic_post_init(self: 'DataclassType', *initvars: Any) -> None: 70 if post_init_original is not None: 71 post_init_original(self, *initvars) 72 d = validate_model(self.__pydantic_model__, self.__dict__, cls=self.__class__)[0] 73 object.__setattr__(self, '__dict__', d) 74 object.__setattr__(self, '__initialised__', True) 75 if post_init_post_parse is not None: 76 post_init_post_parse(self) 77 78 _cls.__post_init__ = _pydantic_post_init 79 cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen) # type: ignore 80 81 fields: Dict[str, Any] = { 82 field.name: (field.type, field.default if field.default != dataclasses.MISSING else Required) 83 for field in dataclasses.fields(cls) 84 } 85 86 validators = gather_validators(cls) 87 cls.__pydantic_model__ = create_model( 88 cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **fields 89 ) 90 91 cls.__initialised__ = False 92 cls.__validate__ = classmethod(_validate_dataclass) 93 cls.__get_validators__ = classmethod(_get_validators) 94 95 if cls.__pydantic_model__.__config__.validate_assignment and not frozen: 96 cls.__setattr__ = setattr_validate_assignment 97 98 return cls 99 100 101 if TYPE_CHECKING: # pragma: no cover 102 # see https://github.com/python/mypy/issues/6239 for explanation of why we do this 103 from dataclasses import dataclass 104 else: 105 106 def dataclass( 107 _cls: Optional[AnyType] = None, 108 *, 109 init: bool = True, 110 repr: bool = True, 111 eq: bool = True, 112 order: bool = False, 113 unsafe_hash: bool = False, 114 frozen: bool = False, 115 config: Type['BaseConfig'] = None, 116 ) -> Union[Callable[[AnyType], 'DataclassType'], 'DataclassType']: 117 """ 118 Like the python standard lib dataclasses but with type validation. 119 120 Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning 121 as Config.validate_assignment. 122 """ 123 124 def wrap(cls: AnyType) -> 'DataclassType': 125 return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, config) 126 127 if _cls is None: 128 return wrap 129 130 return wrap(_cls) 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pydantic/dataclasses.py b/pydantic/dataclasses.py --- a/pydantic/dataclasses.py +++ b/pydantic/dataclasses.py @@ -62,9 +62,12 @@ config: Type['BaseConfig'], ) -> 'DataclassType': post_init_original = getattr(_cls, '__post_init__', None) - post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None) if post_init_original and post_init_original.__name__ == '_pydantic_post_init': post_init_original = None + if not post_init_original: + post_init_original = getattr(_cls, '__post_init_original__', None) + + post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None) def _pydantic_post_init(self: 'DataclassType', *initvars: Any) -> None: if post_init_original is not None: @@ -91,6 +94,8 @@ cls.__initialised__ = False cls.__validate__ = classmethod(_validate_dataclass) cls.__get_validators__ = classmethod(_get_validators) + if post_init_original: + cls.__post_init_original__ = post_init_original if cls.__pydantic_model__.__config__.validate_assignment and not frozen: cls.__setattr__ = setattr_validate_assignment
{"golden_diff": "diff --git a/pydantic/dataclasses.py b/pydantic/dataclasses.py\n--- a/pydantic/dataclasses.py\n+++ b/pydantic/dataclasses.py\n@@ -62,9 +62,12 @@\n config: Type['BaseConfig'],\n ) -> 'DataclassType':\n post_init_original = getattr(_cls, '__post_init__', None)\n- post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)\n if post_init_original and post_init_original.__name__ == '_pydantic_post_init':\n post_init_original = None\n+ if not post_init_original:\n+ post_init_original = getattr(_cls, '__post_init_original__', None)\n+\n+ post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)\n \n def _pydantic_post_init(self: 'DataclassType', *initvars: Any) -> None:\n if post_init_original is not None:\n@@ -91,6 +94,8 @@\n cls.__initialised__ = False\n cls.__validate__ = classmethod(_validate_dataclass)\n cls.__get_validators__ = classmethod(_get_validators)\n+ if post_init_original:\n+ cls.__post_init_original__ = post_init_original\n \n if cls.__pydantic_model__.__config__.validate_assignment and not frozen:\n cls.__setattr__ = setattr_validate_assignment\n", "issue": "__post_init__ is not triggered for descendant dataclass \nThe `__post_init__` method of parent is not called in child classes.\r\n\r\n* OS: **Ubuntu**\r\n* Python version `import sys; print(sys.version)`: **3.7.2 (default, Mar 25 2019, 19:29:53) **\r\n* Pydantic version `import pydantic; print(pydantic.VERSION)`: **0.32.1**\r\n\r\n\r\n```py\r\nfrom dataclasses import dataclass, field\r\nfrom typing import Any\r\n\r\n# comment the pydantinc import below to see the expected result\r\nfrom pydantic.dataclasses import dataclass\r\n\r\n@dataclass\r\nclass Base:\r\n x: float\r\n y: float\r\n\r\n def __post_init__(self):\r\n print('Called!')\r\n self.z = self.x + self.y\r\n\r\n@dataclass\r\nclass Child(Base):\r\n a: int\r\n\r\nobj = Child(a=0, x=1.5, y=2.5)\r\nprint(obj.z)\r\n```\r\n\r\nEverything works fine when using the `dataclass` from standard library. After looking through the code I expected that an easy workaround would be to add simple `__post_init__` in child:\r\n\r\n```python\r\n@dataclass\r\nclass Child(Base):\r\n a: int\r\n\r\n def __post_init__(self):\r\n super().__post_init__()\r\n```\r\n\r\nNow I do get 'Called!' message, but the 'z' attribute is not preserved anyway...\r\n\r\nDo I miss something obvious in the usage of pydantic?\n", "before_files": [{"content": "import dataclasses\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Generator, Optional, Type, Union\n\nfrom .class_validators import gather_validators\nfrom .error_wrappers import ValidationError\nfrom .errors import DataclassTypeError\nfrom .fields import Required\nfrom .main import create_model, validate_model\nfrom .utils import AnyType\n\nif TYPE_CHECKING: # pragma: no cover\n from .main import BaseConfig, BaseModel # noqa: F401\n\n class DataclassType:\n __pydantic_model__: Type[BaseModel]\n __initialised__: bool\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n pass\n\n @classmethod\n def __validate__(cls, v: Any) -> 'DataclassType':\n pass\n\n\ndef _validate_dataclass(cls: Type['DataclassType'], v: Any) -> 'DataclassType':\n if isinstance(v, cls):\n return v\n elif isinstance(v, (list, tuple)):\n return cls(*v)\n elif isinstance(v, dict):\n return cls(**v)\n else:\n raise DataclassTypeError(class_name=cls.__name__)\n\n\ndef _get_validators(cls: Type['DataclassType']) -> Generator[Any, None, None]:\n yield cls.__validate__\n\n\ndef setattr_validate_assignment(self: 'DataclassType', name: str, value: Any) -> None:\n if self.__initialised__:\n d = dict(self.__dict__)\n d.pop(name, None)\n known_field = self.__pydantic_model__.__fields__.get(name, None)\n if known_field:\n value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__)\n if error_:\n raise ValidationError([error_], type(self))\n\n object.__setattr__(self, name, value)\n\n\ndef _process_class(\n _cls: AnyType,\n init: bool,\n repr: bool,\n eq: bool,\n order: bool,\n unsafe_hash: bool,\n frozen: bool,\n config: Type['BaseConfig'],\n) -> 'DataclassType':\n post_init_original = getattr(_cls, '__post_init__', None)\n post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)\n if post_init_original and post_init_original.__name__ == '_pydantic_post_init':\n post_init_original = None\n\n def _pydantic_post_init(self: 'DataclassType', *initvars: Any) -> None:\n if post_init_original is not None:\n post_init_original(self, *initvars)\n d = validate_model(self.__pydantic_model__, self.__dict__, cls=self.__class__)[0]\n object.__setattr__(self, '__dict__', d)\n object.__setattr__(self, '__initialised__', True)\n if post_init_post_parse is not None:\n post_init_post_parse(self)\n\n _cls.__post_init__ = _pydantic_post_init\n cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen) # type: ignore\n\n fields: Dict[str, Any] = {\n field.name: (field.type, field.default if field.default != dataclasses.MISSING else Required)\n for field in dataclasses.fields(cls)\n }\n\n validators = gather_validators(cls)\n cls.__pydantic_model__ = create_model(\n cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **fields\n )\n\n cls.__initialised__ = False\n cls.__validate__ = classmethod(_validate_dataclass)\n cls.__get_validators__ = classmethod(_get_validators)\n\n if cls.__pydantic_model__.__config__.validate_assignment and not frozen:\n cls.__setattr__ = setattr_validate_assignment\n\n return cls\n\n\nif TYPE_CHECKING: # pragma: no cover\n # see https://github.com/python/mypy/issues/6239 for explanation of why we do this\n from dataclasses import dataclass\nelse:\n\n def dataclass(\n _cls: Optional[AnyType] = None,\n *,\n init: bool = True,\n repr: bool = True,\n eq: bool = True,\n order: bool = False,\n unsafe_hash: bool = False,\n frozen: bool = False,\n config: Type['BaseConfig'] = None,\n ) -> Union[Callable[[AnyType], 'DataclassType'], 'DataclassType']:\n \"\"\"\n Like the python standard lib dataclasses but with type validation.\n\n Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning\n as Config.validate_assignment.\n \"\"\"\n\n def wrap(cls: AnyType) -> 'DataclassType':\n return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, config)\n\n if _cls is None:\n return wrap\n\n return wrap(_cls)\n", "path": "pydantic/dataclasses.py"}], "after_files": [{"content": "import dataclasses\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Generator, Optional, Type, Union\n\nfrom .class_validators import gather_validators\nfrom .error_wrappers import ValidationError\nfrom .errors import DataclassTypeError\nfrom .fields import Required\nfrom .main import create_model, validate_model\nfrom .utils import AnyType\n\nif TYPE_CHECKING: # pragma: no cover\n from .main import BaseConfig, BaseModel # noqa: F401\n\n class DataclassType:\n __pydantic_model__: Type[BaseModel]\n __initialised__: bool\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n pass\n\n @classmethod\n def __validate__(cls, v: Any) -> 'DataclassType':\n pass\n\n\ndef _validate_dataclass(cls: Type['DataclassType'], v: Any) -> 'DataclassType':\n if isinstance(v, cls):\n return v\n elif isinstance(v, (list, tuple)):\n return cls(*v)\n elif isinstance(v, dict):\n return cls(**v)\n else:\n raise DataclassTypeError(class_name=cls.__name__)\n\n\ndef _get_validators(cls: Type['DataclassType']) -> Generator[Any, None, None]:\n yield cls.__validate__\n\n\ndef setattr_validate_assignment(self: 'DataclassType', name: str, value: Any) -> None:\n if self.__initialised__:\n d = dict(self.__dict__)\n d.pop(name, None)\n known_field = self.__pydantic_model__.__fields__.get(name, None)\n if known_field:\n value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__)\n if error_:\n raise ValidationError([error_], type(self))\n\n object.__setattr__(self, name, value)\n\n\ndef _process_class(\n _cls: AnyType,\n init: bool,\n repr: bool,\n eq: bool,\n order: bool,\n unsafe_hash: bool,\n frozen: bool,\n config: Type['BaseConfig'],\n) -> 'DataclassType':\n post_init_original = getattr(_cls, '__post_init__', None)\n if post_init_original and post_init_original.__name__ == '_pydantic_post_init':\n post_init_original = None\n if not post_init_original:\n post_init_original = getattr(_cls, '__post_init_original__', None)\n\n post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)\n\n def _pydantic_post_init(self: 'DataclassType', *initvars: Any) -> None:\n if post_init_original is not None:\n post_init_original(self, *initvars)\n d = validate_model(self.__pydantic_model__, self.__dict__, cls=self.__class__)[0]\n object.__setattr__(self, '__dict__', d)\n object.__setattr__(self, '__initialised__', True)\n if post_init_post_parse is not None:\n post_init_post_parse(self)\n\n _cls.__post_init__ = _pydantic_post_init\n cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen) # type: ignore\n\n fields: Dict[str, Any] = {\n field.name: (field.type, field.default if field.default != dataclasses.MISSING else Required)\n for field in dataclasses.fields(cls)\n }\n\n validators = gather_validators(cls)\n cls.__pydantic_model__ = create_model(\n cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **fields\n )\n\n cls.__initialised__ = False\n cls.__validate__ = classmethod(_validate_dataclass)\n cls.__get_validators__ = classmethod(_get_validators)\n if post_init_original:\n cls.__post_init_original__ = post_init_original\n\n if cls.__pydantic_model__.__config__.validate_assignment and not frozen:\n cls.__setattr__ = setattr_validate_assignment\n\n return cls\n\n\nif TYPE_CHECKING: # pragma: no cover\n # see https://github.com/python/mypy/issues/6239 for explanation of why we do this\n from dataclasses import dataclass\nelse:\n\n def dataclass(\n _cls: Optional[AnyType] = None,\n *,\n init: bool = True,\n repr: bool = True,\n eq: bool = True,\n order: bool = False,\n unsafe_hash: bool = False,\n frozen: bool = False,\n config: Type['BaseConfig'] = None,\n ) -> Union[Callable[[AnyType], 'DataclassType'], 'DataclassType']:\n \"\"\"\n Like the python standard lib dataclasses but with type validation.\n\n Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning\n as Config.validate_assignment.\n \"\"\"\n\n def wrap(cls: AnyType) -> 'DataclassType':\n return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, config)\n\n if _cls is None:\n return wrap\n\n return wrap(_cls)\n", "path": "pydantic/dataclasses.py"}]}
1,969
300
gh_patches_debug_34011
rasdani/github-patches
git_diff
statsmodels__statsmodels-5203
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- web._generate_url returns ValueError--> intentional? ``` return ValueError('Input not understood') ``` seems like it should be `raise` instead of `return`. Am I missing something? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `statsmodels/tools/web.py` Content: ``` 1 """ 2 Provides a function to open the system browser to either search or go directly 3 to a function's reference 4 """ 5 import webbrowser 6 7 from statsmodels.compat.python import urlencode 8 from statsmodels import __version__ 9 10 BASE_URL = 'https://www.statsmodels.org/' 11 12 13 def _generate_url(arg, stable): 14 """ 15 Parse inputs and return a correctly formatted URL or an error if the input 16 is not understandable 17 """ 18 url = BASE_URL 19 if stable: 20 url += 'stable/' 21 else: 22 url += 'devel/' 23 24 if arg is None: 25 return url 26 elif type(arg) is str: 27 url += 'search.html?' 28 url += urlencode({'q': arg}) 29 url += '&check_keywords=yes&area=default' 30 else: 31 try: 32 func = arg 33 func_name = func.__name__ 34 func_module = func.__module__ 35 if not func_module.startswith('statsmodels.'): 36 return ValueError('Function must be from statsmodels') 37 url += 'generated/' 38 url += func_module + '.' + func_name + '.html' 39 except: 40 return ValueError('Input not understood') 41 return url 42 43 44 def webdoc(arg=None, stable=None): 45 """ 46 Opens a browser and displays online documentation 47 48 Parameters 49 ---------- 50 arg, optional : string or statsmodels function 51 Either a string to search the documentation or a function 52 stable, optional : bool 53 Flag indicating whether to use the stable documentation (True) or 54 the development documentation (False). If not provided, opens 55 the stable documentation if the current version of statsmodels is a 56 release 57 58 Examples 59 -------- 60 >>> import statsmodels.api as sm 61 >>> sm.webdoc() # Documention site 62 >>> sm.webdoc('glm') # Search for glm in docs 63 >>> sm.webdoc(sm.OLS, stable=False) # Go to generated help for OLS, devel 64 65 Notes 66 ----- 67 By default, open stable documentation if the current version of statsmodels 68 is a release. Otherwise opens the development documentation. 69 70 Uses the default system browser. 71 """ 72 stable = __version__ if 'dev' not in __version__ else stable 73 url_or_error = _generate_url(arg, stable) 74 if isinstance(url_or_error, ValueError): 75 raise url_or_error 76 webbrowser.open(url_or_error) 77 return None 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/statsmodels/tools/web.py b/statsmodels/tools/web.py --- a/statsmodels/tools/web.py +++ b/statsmodels/tools/web.py @@ -4,7 +4,7 @@ """ import webbrowser -from statsmodels.compat.python import urlencode +from statsmodels.compat.python import urlencode, string_types from statsmodels import __version__ BASE_URL = 'https://www.statsmodels.org/' @@ -12,8 +12,8 @@ def _generate_url(arg, stable): """ - Parse inputs and return a correctly formatted URL or an error if the input - is not understandable + Parse inputs and return a correctly formatted URL or raises ValueError + if the input is not understandable """ url = BASE_URL if stable: @@ -23,7 +23,7 @@ if arg is None: return url - elif type(arg) is str: + elif isinstance(arg, string_types): url += 'search.html?' url += urlencode({'q': arg}) url += '&check_keywords=yes&area=default' @@ -33,11 +33,11 @@ func_name = func.__name__ func_module = func.__module__ if not func_module.startswith('statsmodels.'): - return ValueError('Function must be from statsmodels') + raise ValueError('Function must be from statsmodels') url += 'generated/' url += func_module + '.' + func_name + '.html' - except: - return ValueError('Input not understood') + except AttributeError: + raise ValueError('Input not understood') return url @@ -71,7 +71,5 @@ """ stable = __version__ if 'dev' not in __version__ else stable url_or_error = _generate_url(arg, stable) - if isinstance(url_or_error, ValueError): - raise url_or_error webbrowser.open(url_or_error) return None
{"golden_diff": "diff --git a/statsmodels/tools/web.py b/statsmodels/tools/web.py\n--- a/statsmodels/tools/web.py\n+++ b/statsmodels/tools/web.py\n@@ -4,7 +4,7 @@\n \"\"\"\n import webbrowser\n \n-from statsmodels.compat.python import urlencode\n+from statsmodels.compat.python import urlencode, string_types\n from statsmodels import __version__\n \n BASE_URL = 'https://www.statsmodels.org/'\n@@ -12,8 +12,8 @@\n \n def _generate_url(arg, stable):\n \"\"\"\n- Parse inputs and return a correctly formatted URL or an error if the input\n- is not understandable\n+ Parse inputs and return a correctly formatted URL or raises ValueError\n+ if the input is not understandable\n \"\"\"\n url = BASE_URL\n if stable:\n@@ -23,7 +23,7 @@\n \n if arg is None:\n return url\n- elif type(arg) is str:\n+ elif isinstance(arg, string_types):\n url += 'search.html?'\n url += urlencode({'q': arg})\n url += '&check_keywords=yes&area=default'\n@@ -33,11 +33,11 @@\n func_name = func.__name__\n func_module = func.__module__\n if not func_module.startswith('statsmodels.'):\n- return ValueError('Function must be from statsmodels')\n+ raise ValueError('Function must be from statsmodels')\n url += 'generated/'\n url += func_module + '.' + func_name + '.html'\n- except:\n- return ValueError('Input not understood')\n+ except AttributeError:\n+ raise ValueError('Input not understood')\n return url\n \n \n@@ -71,7 +71,5 @@\n \"\"\"\n stable = __version__ if 'dev' not in __version__ else stable\n url_or_error = _generate_url(arg, stable)\n- if isinstance(url_or_error, ValueError):\n- raise url_or_error\n webbrowser.open(url_or_error)\n return None\n", "issue": "web._generate_url returns ValueError--> intentional?\n```\r\n return ValueError('Input not understood')\r\n```\r\nseems like it should be `raise` instead of `return`. Am I missing something?\n", "before_files": [{"content": "\"\"\"\nProvides a function to open the system browser to either search or go directly\nto a function's reference\n\"\"\"\nimport webbrowser\n\nfrom statsmodels.compat.python import urlencode\nfrom statsmodels import __version__\n\nBASE_URL = 'https://www.statsmodels.org/'\n\n\ndef _generate_url(arg, stable):\n \"\"\"\n Parse inputs and return a correctly formatted URL or an error if the input\n is not understandable\n \"\"\"\n url = BASE_URL\n if stable:\n url += 'stable/'\n else:\n url += 'devel/'\n\n if arg is None:\n return url\n elif type(arg) is str:\n url += 'search.html?'\n url += urlencode({'q': arg})\n url += '&check_keywords=yes&area=default'\n else:\n try:\n func = arg\n func_name = func.__name__\n func_module = func.__module__\n if not func_module.startswith('statsmodels.'):\n return ValueError('Function must be from statsmodels')\n url += 'generated/'\n url += func_module + '.' + func_name + '.html'\n except:\n return ValueError('Input not understood')\n return url\n\n\ndef webdoc(arg=None, stable=None):\n \"\"\"\n Opens a browser and displays online documentation\n\n Parameters\n ----------\n arg, optional : string or statsmodels function\n Either a string to search the documentation or a function\n stable, optional : bool\n Flag indicating whether to use the stable documentation (True) or\n the development documentation (False). If not provided, opens\n the stable documentation if the current version of statsmodels is a\n release\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> sm.webdoc() # Documention site\n >>> sm.webdoc('glm') # Search for glm in docs\n >>> sm.webdoc(sm.OLS, stable=False) # Go to generated help for OLS, devel\n\n Notes\n -----\n By default, open stable documentation if the current version of statsmodels\n is a release. Otherwise opens the development documentation.\n\n Uses the default system browser.\n \"\"\"\n stable = __version__ if 'dev' not in __version__ else stable\n url_or_error = _generate_url(arg, stable)\n if isinstance(url_or_error, ValueError):\n raise url_or_error\n webbrowser.open(url_or_error)\n return None\n", "path": "statsmodels/tools/web.py"}], "after_files": [{"content": "\"\"\"\nProvides a function to open the system browser to either search or go directly\nto a function's reference\n\"\"\"\nimport webbrowser\n\nfrom statsmodels.compat.python import urlencode, string_types\nfrom statsmodels import __version__\n\nBASE_URL = 'https://www.statsmodels.org/'\n\n\ndef _generate_url(arg, stable):\n \"\"\"\n Parse inputs and return a correctly formatted URL or raises ValueError\n if the input is not understandable\n \"\"\"\n url = BASE_URL\n if stable:\n url += 'stable/'\n else:\n url += 'devel/'\n\n if arg is None:\n return url\n elif isinstance(arg, string_types):\n url += 'search.html?'\n url += urlencode({'q': arg})\n url += '&check_keywords=yes&area=default'\n else:\n try:\n func = arg\n func_name = func.__name__\n func_module = func.__module__\n if not func_module.startswith('statsmodels.'):\n raise ValueError('Function must be from statsmodels')\n url += 'generated/'\n url += func_module + '.' + func_name + '.html'\n except AttributeError:\n raise ValueError('Input not understood')\n return url\n\n\ndef webdoc(arg=None, stable=None):\n \"\"\"\n Opens a browser and displays online documentation\n\n Parameters\n ----------\n arg, optional : string or statsmodels function\n Either a string to search the documentation or a function\n stable, optional : bool\n Flag indicating whether to use the stable documentation (True) or\n the development documentation (False). If not provided, opens\n the stable documentation if the current version of statsmodels is a\n release\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> sm.webdoc() # Documention site\n >>> sm.webdoc('glm') # Search for glm in docs\n >>> sm.webdoc(sm.OLS, stable=False) # Go to generated help for OLS, devel\n\n Notes\n -----\n By default, open stable documentation if the current version of statsmodels\n is a release. Otherwise opens the development documentation.\n\n Uses the default system browser.\n \"\"\"\n stable = __version__ if 'dev' not in __version__ else stable\n url_or_error = _generate_url(arg, stable)\n webbrowser.open(url_or_error)\n return None\n", "path": "statsmodels/tools/web.py"}]}
964
426
gh_patches_debug_21230
rasdani/github-patches
git_diff
ckan__ckan-6008
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Flask-Babel does not translate zh_TW and zh_CN ### CKAN Version if known (or site URL) ≧ 2.8.0 ### Please describe the expected behaviour When switching to ``zh_TW`` or ``zh_CN`` languages on the pages written in Flask (ex. ``/`` and ``/user``), the pages should be shown in corresponding languages. ### Please describe the actual behaviour It shows English instead of ``zh_TW`` or ``zh_CN``. ### What steps can be taken to reproduce the issue? Switch language to Chinese on the above-mentioned pages. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ckan/views/home.py` Content: ``` 1 # encoding: utf-8 2 3 from flask import Blueprint, abort 4 5 import ckan.model as model 6 import ckan.logic as logic 7 import ckan.lib.base as base 8 import ckan.lib.search as search 9 import ckan.lib.helpers as h 10 11 from ckan.common import g, config, _ 12 13 CACHE_PARAMETERS = [u'__cache', u'__no_cache__'] 14 15 16 home = Blueprint(u'home', __name__) 17 18 19 @home.before_request 20 def before_request(): 21 u'''set context and check authorization''' 22 try: 23 context = { 24 u'model': model, 25 u'user': g.user, 26 u'auth_user_obj': g.userobj} 27 logic.check_access(u'site_read', context) 28 except logic.NotAuthorized: 29 abort(403) 30 31 32 def index(): 33 u'''display home page''' 34 try: 35 context = {u'model': model, u'session': model.Session, 36 u'user': g.user, u'auth_user_obj': g.userobj} 37 data_dict = {u'q': u'*:*', 38 u'facet.field': h.facets(), 39 u'rows': 4, 40 u'start': 0, 41 u'sort': u'view_recent desc', 42 u'fq': u'capacity:"public"'} 43 query = logic.get_action(u'package_search')(context, data_dict) 44 g.search_facets = query['search_facets'] 45 g.package_count = query['count'] 46 g.datasets = query['results'] 47 48 org_label = h.humanize_entity_type( 49 u'organization', 50 h.default_group_type(u'organization'), 51 u'facet label') or _(u'Organizations') 52 53 group_label = h.humanize_entity_type( 54 u'group', 55 h.default_group_type(u'group'), 56 u'facet label') or _(u'Groups') 57 58 g.facet_titles = { 59 u'organization': org_label, 60 u'groups': group_label, 61 u'tags': _(u'Tags'), 62 u'res_format': _(u'Formats'), 63 u'license': _(u'Licenses'), 64 } 65 66 except search.SearchError: 67 g.package_count = 0 68 69 if g.userobj and not g.userobj.email: 70 url = h.url_for(controller=u'user', action=u'edit') 71 msg = _(u'Please <a href="%s">update your profile</a>' 72 u' and add your email address. ') % url + \ 73 _(u'%s uses your email address' 74 u' if you need to reset your password.') \ 75 % config.get(u'ckan.site_title') 76 h.flash_notice(msg, allow_html=True) 77 return base.render(u'home/index.html', extra_vars={}) 78 79 80 def about(): 81 u''' display about page''' 82 return base.render(u'home/about.html', extra_vars={}) 83 84 85 util_rules = [ 86 (u'/', index), 87 (u'/about', about) 88 ] 89 for rule, view_func in util_rules: 90 home.add_url_rule(rule, view_func=view_func) 91 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ckan/views/home.py b/ckan/views/home.py --- a/ckan/views/home.py +++ b/ckan/views/home.py @@ -1,6 +1,6 @@ # encoding: utf-8 -from flask import Blueprint, abort +from flask import Blueprint, abort, redirect import ckan.model as model import ckan.logic as logic @@ -82,9 +82,36 @@ return base.render(u'home/about.html', extra_vars={}) +def redirect_locale(target_locale, path=None): + target = f'/{target_locale}/{path}' if path else f'/{target_locale}' + return redirect(target, code=308) + + util_rules = [ (u'/', index), (u'/about', about) ] for rule, view_func in util_rules: home.add_url_rule(rule, view_func=view_func) + +locales_mapping = [ + ('zh_TW', 'zh_Hant_TW'), + ('zh_CN', 'zh_Hans_CN'), +] + +for locale in locales_mapping: + + legacy_locale = locale[0] + new_locale = locale[1] + + home.add_url_rule( + f'/{legacy_locale}/', + view_func=redirect_locale, + defaults={'target_locale': new_locale} + ) + + home.add_url_rule( + f'/{legacy_locale}/<path:path>', + view_func=redirect_locale, + defaults={'target_locale': new_locale} + )
{"golden_diff": "diff --git a/ckan/views/home.py b/ckan/views/home.py\n--- a/ckan/views/home.py\n+++ b/ckan/views/home.py\n@@ -1,6 +1,6 @@\n # encoding: utf-8\n \n-from flask import Blueprint, abort\n+from flask import Blueprint, abort, redirect\n \n import ckan.model as model\n import ckan.logic as logic\n@@ -82,9 +82,36 @@\n return base.render(u'home/about.html', extra_vars={})\n \n \n+def redirect_locale(target_locale, path=None):\n+ target = f'/{target_locale}/{path}' if path else f'/{target_locale}'\n+ return redirect(target, code=308)\n+\n+\n util_rules = [\n (u'/', index),\n (u'/about', about)\n ]\n for rule, view_func in util_rules:\n home.add_url_rule(rule, view_func=view_func)\n+\n+locales_mapping = [\n+ ('zh_TW', 'zh_Hant_TW'),\n+ ('zh_CN', 'zh_Hans_CN'),\n+]\n+\n+for locale in locales_mapping:\n+\n+ legacy_locale = locale[0]\n+ new_locale = locale[1]\n+\n+ home.add_url_rule(\n+ f'/{legacy_locale}/',\n+ view_func=redirect_locale,\n+ defaults={'target_locale': new_locale}\n+ )\n+\n+ home.add_url_rule(\n+ f'/{legacy_locale}/<path:path>',\n+ view_func=redirect_locale,\n+ defaults={'target_locale': new_locale}\n+ )\n", "issue": "Flask-Babel does not translate zh_TW and zh_CN\n### CKAN Version if known (or site URL)\r\n\r\n\u2267 2.8.0\r\n\r\n### Please describe the expected behaviour\r\n\r\nWhen switching to ``zh_TW`` or ``zh_CN`` languages on the pages written in Flask (ex. ``/`` and ``/user``), the pages should be shown in corresponding languages.\r\n\r\n### Please describe the actual behaviour\r\n\r\nIt shows English instead of ``zh_TW`` or ``zh_CN``.\r\n\r\n### What steps can be taken to reproduce the issue? \r\n\r\nSwitch language to Chinese on the above-mentioned pages.\n", "before_files": [{"content": "# encoding: utf-8\n\nfrom flask import Blueprint, abort\n\nimport ckan.model as model\nimport ckan.logic as logic\nimport ckan.lib.base as base\nimport ckan.lib.search as search\nimport ckan.lib.helpers as h\n\nfrom ckan.common import g, config, _\n\nCACHE_PARAMETERS = [u'__cache', u'__no_cache__']\n\n\nhome = Blueprint(u'home', __name__)\n\n\n@home.before_request\ndef before_request():\n u'''set context and check authorization'''\n try:\n context = {\n u'model': model,\n u'user': g.user,\n u'auth_user_obj': g.userobj}\n logic.check_access(u'site_read', context)\n except logic.NotAuthorized:\n abort(403)\n\n\ndef index():\n u'''display home page'''\n try:\n context = {u'model': model, u'session': model.Session,\n u'user': g.user, u'auth_user_obj': g.userobj}\n data_dict = {u'q': u'*:*',\n u'facet.field': h.facets(),\n u'rows': 4,\n u'start': 0,\n u'sort': u'view_recent desc',\n u'fq': u'capacity:\"public\"'}\n query = logic.get_action(u'package_search')(context, data_dict)\n g.search_facets = query['search_facets']\n g.package_count = query['count']\n g.datasets = query['results']\n\n org_label = h.humanize_entity_type(\n u'organization',\n h.default_group_type(u'organization'),\n u'facet label') or _(u'Organizations')\n\n group_label = h.humanize_entity_type(\n u'group',\n h.default_group_type(u'group'),\n u'facet label') or _(u'Groups')\n\n g.facet_titles = {\n u'organization': org_label,\n u'groups': group_label,\n u'tags': _(u'Tags'),\n u'res_format': _(u'Formats'),\n u'license': _(u'Licenses'),\n }\n\n except search.SearchError:\n g.package_count = 0\n\n if g.userobj and not g.userobj.email:\n url = h.url_for(controller=u'user', action=u'edit')\n msg = _(u'Please <a href=\"%s\">update your profile</a>'\n u' and add your email address. ') % url + \\\n _(u'%s uses your email address'\n u' if you need to reset your password.') \\\n % config.get(u'ckan.site_title')\n h.flash_notice(msg, allow_html=True)\n return base.render(u'home/index.html', extra_vars={})\n\n\ndef about():\n u''' display about page'''\n return base.render(u'home/about.html', extra_vars={})\n\n\nutil_rules = [\n (u'/', index),\n (u'/about', about)\n]\nfor rule, view_func in util_rules:\n home.add_url_rule(rule, view_func=view_func)\n", "path": "ckan/views/home.py"}], "after_files": [{"content": "# encoding: utf-8\n\nfrom flask import Blueprint, abort, redirect\n\nimport ckan.model as model\nimport ckan.logic as logic\nimport ckan.lib.base as base\nimport ckan.lib.search as search\nimport ckan.lib.helpers as h\n\nfrom ckan.common import g, config, _\n\nCACHE_PARAMETERS = [u'__cache', u'__no_cache__']\n\n\nhome = Blueprint(u'home', __name__)\n\n\n@home.before_request\ndef before_request():\n u'''set context and check authorization'''\n try:\n context = {\n u'model': model,\n u'user': g.user,\n u'auth_user_obj': g.userobj}\n logic.check_access(u'site_read', context)\n except logic.NotAuthorized:\n abort(403)\n\n\ndef index():\n u'''display home page'''\n try:\n context = {u'model': model, u'session': model.Session,\n u'user': g.user, u'auth_user_obj': g.userobj}\n data_dict = {u'q': u'*:*',\n u'facet.field': h.facets(),\n u'rows': 4,\n u'start': 0,\n u'sort': u'view_recent desc',\n u'fq': u'capacity:\"public\"'}\n query = logic.get_action(u'package_search')(context, data_dict)\n g.search_facets = query['search_facets']\n g.package_count = query['count']\n g.datasets = query['results']\n\n org_label = h.humanize_entity_type(\n u'organization',\n h.default_group_type(u'organization'),\n u'facet label') or _(u'Organizations')\n\n group_label = h.humanize_entity_type(\n u'group',\n h.default_group_type(u'group'),\n u'facet label') or _(u'Groups')\n\n g.facet_titles = {\n u'organization': org_label,\n u'groups': group_label,\n u'tags': _(u'Tags'),\n u'res_format': _(u'Formats'),\n u'license': _(u'Licenses'),\n }\n\n except search.SearchError:\n g.package_count = 0\n\n if g.userobj and not g.userobj.email:\n url = h.url_for(controller=u'user', action=u'edit')\n msg = _(u'Please <a href=\"%s\">update your profile</a>'\n u' and add your email address. ') % url + \\\n _(u'%s uses your email address'\n u' if you need to reset your password.') \\\n % config.get(u'ckan.site_title')\n h.flash_notice(msg, allow_html=True)\n return base.render(u'home/index.html', extra_vars={})\n\n\ndef about():\n u''' display about page'''\n return base.render(u'home/about.html', extra_vars={})\n\n\ndef redirect_locale(target_locale, path=None):\n target = f'/{target_locale}/{path}' if path else f'/{target_locale}'\n return redirect(target, code=308)\n\n\nutil_rules = [\n (u'/', index),\n (u'/about', about)\n]\nfor rule, view_func in util_rules:\n home.add_url_rule(rule, view_func=view_func)\n\nlocales_mapping = [\n ('zh_TW', 'zh_Hant_TW'),\n ('zh_CN', 'zh_Hans_CN'),\n]\n\nfor locale in locales_mapping:\n\n legacy_locale = locale[0]\n new_locale = locale[1]\n\n home.add_url_rule(\n f'/{legacy_locale}/',\n view_func=redirect_locale,\n defaults={'target_locale': new_locale}\n )\n\n home.add_url_rule(\n f'/{legacy_locale}/<path:path>',\n view_func=redirect_locale,\n defaults={'target_locale': new_locale}\n )\n", "path": "ckan/views/home.py"}]}
1,220
336
gh_patches_debug_6079
rasdani/github-patches
git_diff
carpentries__amy-2324
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: Internal Server Error: /fiscal/organizations/ Production issue. ``` FieldError at /fiscal/organizations/ Cannot resolve keyword 'membership' into field. Choices are: administered_events, affiliated_organizations, country, domain, fullname, hosted_events, id, latitude, longitude, member, memberships, selforganisedsubmission, sponsored_events, workshopinquiryrequest, workshoprequest ``` https://amy.carpentries.org/fiscal/organizations/?country=&membership__variant=silver&order_by= --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `amy/fiscal/filters.py` Content: ``` 1 from datetime import date 2 3 from django.forms import widgets 4 import django_filters 5 6 from workshops.fields import Select2MultipleWidget, Select2Widget 7 from workshops.filters import AllCountriesFilter, AMYFilterSet 8 from workshops.models import Membership, Organization 9 10 11 class OrganizationFilter(AMYFilterSet): 12 country = AllCountriesFilter(widget=Select2Widget) 13 14 membership__variant = django_filters.MultipleChoiceFilter( 15 label="Memberships (current or past)", 16 choices=Membership.MEMBERSHIP_CHOICES, 17 widget=Select2MultipleWidget, 18 ) 19 20 order_by = django_filters.OrderingFilter( 21 fields=( 22 "fullname", 23 "domain", 24 ), 25 ) 26 27 class Meta: 28 model = Organization 29 fields = [ 30 "country", 31 ] 32 33 34 def filter_active_memberships_only(queryset, name, active): 35 """Limit Memberships to only active entries.""" 36 if active: 37 today = date.today() 38 return queryset.filter(agreement_start__lte=today, agreement_end__gte=today) 39 else: 40 return queryset 41 42 43 def filter_training_seats_only(queryset, name, seats): 44 """Limit Memberships to only entries with some training seats allowed.""" 45 if seats: 46 return queryset.filter(instructor_training_seats_total__gt=0) 47 else: 48 return queryset 49 50 51 def filter_nonpositive_remaining_seats(queryset, name, seats): 52 """Limit Memberships to only entries with negative remaining seats.""" 53 if seats: 54 return queryset.filter(instructor_training_seats_remaining__lt=0) 55 else: 56 return queryset 57 58 59 class MembershipFilter(AMYFilterSet): 60 organization_name = django_filters.CharFilter( 61 label="Organisation name", 62 field_name="organizations__fullname", 63 lookup_expr="icontains", 64 ) 65 66 MEMBERSHIP_CHOICES = (("", "Any"),) + Membership.MEMBERSHIP_CHOICES 67 variant = django_filters.ChoiceFilter(choices=MEMBERSHIP_CHOICES) 68 69 CONTRIBUTION_CHOICES = (("", "Any"),) + Membership.CONTRIBUTION_CHOICES 70 contribution_type = django_filters.ChoiceFilter(choices=CONTRIBUTION_CHOICES) 71 72 active_only = django_filters.BooleanFilter( 73 label="Only show active memberships", 74 method=filter_active_memberships_only, 75 widget=widgets.CheckboxInput, 76 ) 77 78 training_seats_only = django_filters.BooleanFilter( 79 label="Only show memberships with non-zero allowed training seats", 80 method=filter_training_seats_only, 81 widget=widgets.CheckboxInput, 82 ) 83 84 nonpositive_remaining_seats_only = django_filters.BooleanFilter( 85 label="Only show memberships with zero or less remaining seats", 86 method=filter_nonpositive_remaining_seats, 87 widget=widgets.CheckboxInput, 88 ) 89 90 order_by = django_filters.OrderingFilter( 91 fields=( 92 "agreement_start", 93 "agreement_end", 94 "instructor_training_seats_remaining", 95 ), 96 ) 97 98 class Meta: 99 model = Membership 100 fields = [ 101 "organization_name", 102 "consortium", 103 "public_status", 104 "variant", 105 "contribution_type", 106 ] 107 108 109 class MembershipTrainingsFilter(AMYFilterSet): 110 organization_name = django_filters.CharFilter( 111 label="Organization name", 112 field_name="organization__fullname", 113 lookup_expr="icontains", 114 ) 115 116 active_only = django_filters.BooleanFilter( 117 label="Only show active memberships", 118 method=filter_active_memberships_only, 119 widget=widgets.CheckboxInput, 120 ) 121 122 training_seats_only = django_filters.BooleanFilter( 123 label="Only show memberships with non-zero allowed training seats", 124 method=filter_training_seats_only, 125 widget=widgets.CheckboxInput, 126 ) 127 128 nonpositive_remaining_seats_only = django_filters.BooleanFilter( 129 label="Only show memberships with zero or less remaining seats", 130 method=filter_nonpositive_remaining_seats, 131 widget=widgets.CheckboxInput, 132 ) 133 134 order_by = django_filters.OrderingFilter( 135 fields=( 136 "organization__fullname", 137 "organization__domain", 138 "agreement_start", 139 "agreement_end", 140 "instructor_training_seats_total", 141 "instructor_training_seats_utilized", 142 "instructor_training_seats_remaining", 143 ), 144 ) 145 146 class Meta: 147 model = Membership 148 fields = [ 149 "organization_name", 150 ] 151 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/amy/fiscal/filters.py b/amy/fiscal/filters.py --- a/amy/fiscal/filters.py +++ b/amy/fiscal/filters.py @@ -11,7 +11,7 @@ class OrganizationFilter(AMYFilterSet): country = AllCountriesFilter(widget=Select2Widget) - membership__variant = django_filters.MultipleChoiceFilter( + memberships__variant = django_filters.MultipleChoiceFilter( label="Memberships (current or past)", choices=Membership.MEMBERSHIP_CHOICES, widget=Select2MultipleWidget,
{"golden_diff": "diff --git a/amy/fiscal/filters.py b/amy/fiscal/filters.py\n--- a/amy/fiscal/filters.py\n+++ b/amy/fiscal/filters.py\n@@ -11,7 +11,7 @@\n class OrganizationFilter(AMYFilterSet):\n country = AllCountriesFilter(widget=Select2Widget)\n \n- membership__variant = django_filters.MultipleChoiceFilter(\n+ memberships__variant = django_filters.MultipleChoiceFilter(\n label=\"Memberships (current or past)\",\n choices=Membership.MEMBERSHIP_CHOICES,\n widget=Select2MultipleWidget,\n", "issue": "Bug: Internal Server Error: /fiscal/organizations/\nProduction issue.\r\n\r\n```\r\nFieldError at /fiscal/organizations/\r\nCannot resolve keyword 'membership' into field. Choices are: administered_events, affiliated_organizations, country, domain, fullname, hosted_events, id, latitude, longitude, member, memberships, selforganisedsubmission, sponsored_events, workshopinquiryrequest, workshoprequest\r\n```\r\n\r\nhttps://amy.carpentries.org/fiscal/organizations/?country=&membership__variant=silver&order_by=\n", "before_files": [{"content": "from datetime import date\n\nfrom django.forms import widgets\nimport django_filters\n\nfrom workshops.fields import Select2MultipleWidget, Select2Widget\nfrom workshops.filters import AllCountriesFilter, AMYFilterSet\nfrom workshops.models import Membership, Organization\n\n\nclass OrganizationFilter(AMYFilterSet):\n country = AllCountriesFilter(widget=Select2Widget)\n\n membership__variant = django_filters.MultipleChoiceFilter(\n label=\"Memberships (current or past)\",\n choices=Membership.MEMBERSHIP_CHOICES,\n widget=Select2MultipleWidget,\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\n \"fullname\",\n \"domain\",\n ),\n )\n\n class Meta:\n model = Organization\n fields = [\n \"country\",\n ]\n\n\ndef filter_active_memberships_only(queryset, name, active):\n \"\"\"Limit Memberships to only active entries.\"\"\"\n if active:\n today = date.today()\n return queryset.filter(agreement_start__lte=today, agreement_end__gte=today)\n else:\n return queryset\n\n\ndef filter_training_seats_only(queryset, name, seats):\n \"\"\"Limit Memberships to only entries with some training seats allowed.\"\"\"\n if seats:\n return queryset.filter(instructor_training_seats_total__gt=0)\n else:\n return queryset\n\n\ndef filter_nonpositive_remaining_seats(queryset, name, seats):\n \"\"\"Limit Memberships to only entries with negative remaining seats.\"\"\"\n if seats:\n return queryset.filter(instructor_training_seats_remaining__lt=0)\n else:\n return queryset\n\n\nclass MembershipFilter(AMYFilterSet):\n organization_name = django_filters.CharFilter(\n label=\"Organisation name\",\n field_name=\"organizations__fullname\",\n lookup_expr=\"icontains\",\n )\n\n MEMBERSHIP_CHOICES = ((\"\", \"Any\"),) + Membership.MEMBERSHIP_CHOICES\n variant = django_filters.ChoiceFilter(choices=MEMBERSHIP_CHOICES)\n\n CONTRIBUTION_CHOICES = ((\"\", \"Any\"),) + Membership.CONTRIBUTION_CHOICES\n contribution_type = django_filters.ChoiceFilter(choices=CONTRIBUTION_CHOICES)\n\n active_only = django_filters.BooleanFilter(\n label=\"Only show active memberships\",\n method=filter_active_memberships_only,\n widget=widgets.CheckboxInput,\n )\n\n training_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with non-zero allowed training seats\",\n method=filter_training_seats_only,\n widget=widgets.CheckboxInput,\n )\n\n nonpositive_remaining_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with zero or less remaining seats\",\n method=filter_nonpositive_remaining_seats,\n widget=widgets.CheckboxInput,\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\n \"agreement_start\",\n \"agreement_end\",\n \"instructor_training_seats_remaining\",\n ),\n )\n\n class Meta:\n model = Membership\n fields = [\n \"organization_name\",\n \"consortium\",\n \"public_status\",\n \"variant\",\n \"contribution_type\",\n ]\n\n\nclass MembershipTrainingsFilter(AMYFilterSet):\n organization_name = django_filters.CharFilter(\n label=\"Organization name\",\n field_name=\"organization__fullname\",\n lookup_expr=\"icontains\",\n )\n\n active_only = django_filters.BooleanFilter(\n label=\"Only show active memberships\",\n method=filter_active_memberships_only,\n widget=widgets.CheckboxInput,\n )\n\n training_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with non-zero allowed training seats\",\n method=filter_training_seats_only,\n widget=widgets.CheckboxInput,\n )\n\n nonpositive_remaining_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with zero or less remaining seats\",\n method=filter_nonpositive_remaining_seats,\n widget=widgets.CheckboxInput,\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\n \"organization__fullname\",\n \"organization__domain\",\n \"agreement_start\",\n \"agreement_end\",\n \"instructor_training_seats_total\",\n \"instructor_training_seats_utilized\",\n \"instructor_training_seats_remaining\",\n ),\n )\n\n class Meta:\n model = Membership\n fields = [\n \"organization_name\",\n ]\n", "path": "amy/fiscal/filters.py"}], "after_files": [{"content": "from datetime import date\n\nfrom django.forms import widgets\nimport django_filters\n\nfrom workshops.fields import Select2MultipleWidget, Select2Widget\nfrom workshops.filters import AllCountriesFilter, AMYFilterSet\nfrom workshops.models import Membership, Organization\n\n\nclass OrganizationFilter(AMYFilterSet):\n country = AllCountriesFilter(widget=Select2Widget)\n\n memberships__variant = django_filters.MultipleChoiceFilter(\n label=\"Memberships (current or past)\",\n choices=Membership.MEMBERSHIP_CHOICES,\n widget=Select2MultipleWidget,\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\n \"fullname\",\n \"domain\",\n ),\n )\n\n class Meta:\n model = Organization\n fields = [\n \"country\",\n ]\n\n\ndef filter_active_memberships_only(queryset, name, active):\n \"\"\"Limit Memberships to only active entries.\"\"\"\n if active:\n today = date.today()\n return queryset.filter(agreement_start__lte=today, agreement_end__gte=today)\n else:\n return queryset\n\n\ndef filter_training_seats_only(queryset, name, seats):\n \"\"\"Limit Memberships to only entries with some training seats allowed.\"\"\"\n if seats:\n return queryset.filter(instructor_training_seats_total__gt=0)\n else:\n return queryset\n\n\ndef filter_nonpositive_remaining_seats(queryset, name, seats):\n \"\"\"Limit Memberships to only entries with negative remaining seats.\"\"\"\n if seats:\n return queryset.filter(instructor_training_seats_remaining__lt=0)\n else:\n return queryset\n\n\nclass MembershipFilter(AMYFilterSet):\n organization_name = django_filters.CharFilter(\n label=\"Organisation name\",\n field_name=\"organizations__fullname\",\n lookup_expr=\"icontains\",\n )\n\n MEMBERSHIP_CHOICES = ((\"\", \"Any\"),) + Membership.MEMBERSHIP_CHOICES\n variant = django_filters.ChoiceFilter(choices=MEMBERSHIP_CHOICES)\n\n CONTRIBUTION_CHOICES = ((\"\", \"Any\"),) + Membership.CONTRIBUTION_CHOICES\n contribution_type = django_filters.ChoiceFilter(choices=CONTRIBUTION_CHOICES)\n\n active_only = django_filters.BooleanFilter(\n label=\"Only show active memberships\",\n method=filter_active_memberships_only,\n widget=widgets.CheckboxInput,\n )\n\n training_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with non-zero allowed training seats\",\n method=filter_training_seats_only,\n widget=widgets.CheckboxInput,\n )\n\n nonpositive_remaining_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with zero or less remaining seats\",\n method=filter_nonpositive_remaining_seats,\n widget=widgets.CheckboxInput,\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\n \"agreement_start\",\n \"agreement_end\",\n \"instructor_training_seats_remaining\",\n ),\n )\n\n class Meta:\n model = Membership\n fields = [\n \"organization_name\",\n \"consortium\",\n \"public_status\",\n \"variant\",\n \"contribution_type\",\n ]\n\n\nclass MembershipTrainingsFilter(AMYFilterSet):\n organization_name = django_filters.CharFilter(\n label=\"Organization name\",\n field_name=\"organization__fullname\",\n lookup_expr=\"icontains\",\n )\n\n active_only = django_filters.BooleanFilter(\n label=\"Only show active memberships\",\n method=filter_active_memberships_only,\n widget=widgets.CheckboxInput,\n )\n\n training_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with non-zero allowed training seats\",\n method=filter_training_seats_only,\n widget=widgets.CheckboxInput,\n )\n\n nonpositive_remaining_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with zero or less remaining seats\",\n method=filter_nonpositive_remaining_seats,\n widget=widgets.CheckboxInput,\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\n \"organization__fullname\",\n \"organization__domain\",\n \"agreement_start\",\n \"agreement_end\",\n \"instructor_training_seats_total\",\n \"instructor_training_seats_utilized\",\n \"instructor_training_seats_remaining\",\n ),\n )\n\n class Meta:\n model = Membership\n fields = [\n \"organization_name\",\n ]\n", "path": "amy/fiscal/filters.py"}]}
1,629
127
gh_patches_debug_14037
rasdani/github-patches
git_diff
voicepaw__so-vits-svc-fork-557
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PermissionError: [Errno 13] Permission denied I'm on Windows 10 WSL2 Ubuntu and this happens when I try to run `svc pre-resample`, what's going on? Traceback (most recent call last): File "/home/fab/miniconda3/envs/sovits/bin/svc", line 5, in <module> from so_vits_svc_fork.__main__ import cli File "/home/fab/miniconda3/envs/sovits/lib/python3.10/site-packages/so_vits_svc_fork/__init__.py", line 5, in <module> init_logger() File "/home/fab/miniconda3/envs/sovits/lib/python3.10/site-packages/so_vits_svc_fork/logger.py", line 31, in init_logger FileHandler(f"{__name__.split('.')[0]}.log"), File "/home/fab/miniconda3/envs/sovits/lib/python3.10/logging/__init__.py", line 1169, in __init__ StreamHandler.__init__(self, self._open()) File "/home/fab/miniconda3/envs/sovits/lib/python3.10/logging/__init__.py", line 1201, in _open return open_func(self.baseFilename, self.mode, PermissionError: [Errno 13] Permission denied: '/home/fab/sovits/so_vits_svc_fork.log' --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/so_vits_svc_fork/logger.py` Content: ``` 1 import os 2 import sys 3 from logging import ( 4 DEBUG, 5 INFO, 6 FileHandler, 7 StreamHandler, 8 basicConfig, 9 captureWarnings, 10 getLogger, 11 ) 12 from pathlib import Path 13 14 from rich.logging import RichHandler 15 16 LOGGER_INIT = False 17 18 19 def init_logger() -> None: 20 global LOGGER_INIT 21 if LOGGER_INIT: 22 return 23 24 IS_TEST = "test" in Path.cwd().stem 25 package_name = sys.modules[__name__].__package__ 26 basicConfig( 27 level=INFO, 28 format="%(asctime)s %(message)s", 29 datefmt="[%X]", 30 handlers=[ 31 StreamHandler() if is_notebook() else RichHandler(), 32 FileHandler(f"{package_name}.log"), 33 ], 34 ) 35 if IS_TEST: 36 getLogger(package_name).setLevel(DEBUG) 37 captureWarnings(True) 38 LOGGER_INIT = True 39 40 41 def is_notebook(): 42 try: 43 from IPython import get_ipython 44 45 if "IPKernelApp" not in get_ipython().config: # pragma: no cover 46 raise ImportError("console") 47 return False 48 if "VSCODE_PID" in os.environ: # pragma: no cover 49 raise ImportError("vscode") 50 return False 51 except Exception: 52 return False 53 else: # pragma: no cover 54 return True 55 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/so_vits_svc_fork/logger.py b/src/so_vits_svc_fork/logger.py --- a/src/so_vits_svc_fork/logger.py +++ b/src/so_vits_svc_fork/logger.py @@ -1,14 +1,6 @@ import os import sys -from logging import ( - DEBUG, - INFO, - FileHandler, - StreamHandler, - basicConfig, - captureWarnings, - getLogger, -) +from logging import DEBUG, INFO, StreamHandler, basicConfig, captureWarnings, getLogger from pathlib import Path from rich.logging import RichHandler @@ -29,7 +21,7 @@ datefmt="[%X]", handlers=[ StreamHandler() if is_notebook() else RichHandler(), - FileHandler(f"{package_name}.log"), + # FileHandler(f"{package_name}.log"), ], ) if IS_TEST:
{"golden_diff": "diff --git a/src/so_vits_svc_fork/logger.py b/src/so_vits_svc_fork/logger.py\n--- a/src/so_vits_svc_fork/logger.py\n+++ b/src/so_vits_svc_fork/logger.py\n@@ -1,14 +1,6 @@\n import os\n import sys\n-from logging import (\n- DEBUG,\n- INFO,\n- FileHandler,\n- StreamHandler,\n- basicConfig,\n- captureWarnings,\n- getLogger,\n-)\n+from logging import DEBUG, INFO, StreamHandler, basicConfig, captureWarnings, getLogger\n from pathlib import Path\n \n from rich.logging import RichHandler\n@@ -29,7 +21,7 @@\n datefmt=\"[%X]\",\n handlers=[\n StreamHandler() if is_notebook() else RichHandler(),\n- FileHandler(f\"{package_name}.log\"),\n+ # FileHandler(f\"{package_name}.log\"),\n ],\n )\n if IS_TEST:\n", "issue": "PermissionError: [Errno 13] Permission denied\nI'm on Windows 10 WSL2 Ubuntu and this happens when I try to run `svc pre-resample`, what's going on?\r\n\r\nTraceback (most recent call last):\r\n File \"/home/fab/miniconda3/envs/sovits/bin/svc\", line 5, in <module>\r\n from so_vits_svc_fork.__main__ import cli\r\n File \"/home/fab/miniconda3/envs/sovits/lib/python3.10/site-packages/so_vits_svc_fork/__init__.py\", line 5, in <module>\r\n init_logger()\r\n File \"/home/fab/miniconda3/envs/sovits/lib/python3.10/site-packages/so_vits_svc_fork/logger.py\", line 31, in init_logger\r\n FileHandler(f\"{__name__.split('.')[0]}.log\"),\r\n File \"/home/fab/miniconda3/envs/sovits/lib/python3.10/logging/__init__.py\", line 1169, in __init__\r\n StreamHandler.__init__(self, self._open())\r\n File \"/home/fab/miniconda3/envs/sovits/lib/python3.10/logging/__init__.py\", line 1201, in _open\r\n return open_func(self.baseFilename, self.mode,\r\nPermissionError: [Errno 13] Permission denied: '/home/fab/sovits/so_vits_svc_fork.log'\n", "before_files": [{"content": "import os\nimport sys\nfrom logging import (\n DEBUG,\n INFO,\n FileHandler,\n StreamHandler,\n basicConfig,\n captureWarnings,\n getLogger,\n)\nfrom pathlib import Path\n\nfrom rich.logging import RichHandler\n\nLOGGER_INIT = False\n\n\ndef init_logger() -> None:\n global LOGGER_INIT\n if LOGGER_INIT:\n return\n\n IS_TEST = \"test\" in Path.cwd().stem\n package_name = sys.modules[__name__].__package__\n basicConfig(\n level=INFO,\n format=\"%(asctime)s %(message)s\",\n datefmt=\"[%X]\",\n handlers=[\n StreamHandler() if is_notebook() else RichHandler(),\n FileHandler(f\"{package_name}.log\"),\n ],\n )\n if IS_TEST:\n getLogger(package_name).setLevel(DEBUG)\n captureWarnings(True)\n LOGGER_INIT = True\n\n\ndef is_notebook():\n try:\n from IPython import get_ipython\n\n if \"IPKernelApp\" not in get_ipython().config: # pragma: no cover\n raise ImportError(\"console\")\n return False\n if \"VSCODE_PID\" in os.environ: # pragma: no cover\n raise ImportError(\"vscode\")\n return False\n except Exception:\n return False\n else: # pragma: no cover\n return True\n", "path": "src/so_vits_svc_fork/logger.py"}], "after_files": [{"content": "import os\nimport sys\nfrom logging import DEBUG, INFO, StreamHandler, basicConfig, captureWarnings, getLogger\nfrom pathlib import Path\n\nfrom rich.logging import RichHandler\n\nLOGGER_INIT = False\n\n\ndef init_logger() -> None:\n global LOGGER_INIT\n if LOGGER_INIT:\n return\n\n IS_TEST = \"test\" in Path.cwd().stem\n package_name = sys.modules[__name__].__package__\n basicConfig(\n level=INFO,\n format=\"%(asctime)s %(message)s\",\n datefmt=\"[%X]\",\n handlers=[\n StreamHandler() if is_notebook() else RichHandler(),\n # FileHandler(f\"{package_name}.log\"),\n ],\n )\n if IS_TEST:\n getLogger(package_name).setLevel(DEBUG)\n captureWarnings(True)\n LOGGER_INIT = True\n\n\ndef is_notebook():\n try:\n from IPython import get_ipython\n\n if \"IPKernelApp\" not in get_ipython().config: # pragma: no cover\n raise ImportError(\"console\")\n return False\n if \"VSCODE_PID\" in os.environ: # pragma: no cover\n raise ImportError(\"vscode\")\n return False\n except Exception:\n return False\n else: # pragma: no cover\n return True\n", "path": "src/so_vits_svc_fork/logger.py"}]}
979
205
gh_patches_debug_469
rasdani/github-patches
git_diff
googleapis__google-api-python-client-293
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Switch from "uritemplate" to "uritemplate.py" There are at least 2 URI template packages on the PyPI: [uritemplate](https://pypi.python.org/pypi/uritemplate/0.6) and [uritemplate.py](https://pypi.python.org/pypi/uritemplate.py/0.3.0). Both of these packages use `uritemplate` as the package name to be imported, which causes a conflict when both are installed (see https://github.com/sigmavirus24/uritemplate/issues/14). I propose that this project depend on `uritemplate.py` because: - `uritemplate.py` is API-compatible with `uritemplate`, but the reverse is not true. I have confirmed that this library works with uritemplate.py. - Other projects depend on `uritemplate.py`. For example, we are currently working on a project that depends on both `github3.py` (GitHub API client) and `google-api-python-client`. Installing both results in an immediate `ImportError` due to the `uritemplate` conflict. This is a simple, low-risk change that would aid compatibility with other projects. Thoughts? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # Copyright 2014 Google Inc. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Setup script for Google API Python client. 16 17 Also installs included versions of third party libraries, if those libraries 18 are not already installed. 19 """ 20 from __future__ import print_function 21 22 import sys 23 24 if sys.version_info < (2, 6): 25 print('google-api-python-client requires python version >= 2.6.', 26 file=sys.stderr) 27 sys.exit(1) 28 if (3, 1) <= sys.version_info < (3, 3): 29 print('google-api-python-client requires python3 version >= 3.3.', 30 file=sys.stderr) 31 sys.exit(1) 32 33 from setuptools import setup 34 import pkg_resources 35 36 def _DetectBadness(): 37 import os 38 if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ: 39 return 40 o2c_pkg = None 41 try: 42 o2c_pkg = pkg_resources.get_distribution('oauth2client') 43 except pkg_resources.DistributionNotFound: 44 pass 45 oauth2client = None 46 try: 47 import oauth2client 48 except ImportError: 49 pass 50 if o2c_pkg is None and oauth2client is not None: 51 raise RuntimeError( 52 'Previous version of google-api-python-client detected; due to a ' 53 'packaging issue, we cannot perform an in-place upgrade. Please remove ' 54 'the old version and re-install this package.' 55 ) 56 57 _DetectBadness() 58 59 packages = [ 60 'apiclient', 61 'googleapiclient', 62 'googleapiclient/discovery_cache', 63 ] 64 65 install_requires = [ 66 'httplib2>=0.8,<1', 67 'oauth2client>=1.5.0,<4.0.0', 68 'six>=1.6.1,<2', 69 'uritemplate>=0.6,<1', 70 ] 71 72 if sys.version_info < (2, 7): 73 install_requires.append('argparse') 74 75 long_desc = """The Google API Client for Python is a client library for 76 accessing the Plus, Moderator, and many other Google APIs.""" 77 78 import googleapiclient 79 version = googleapiclient.__version__ 80 81 setup( 82 name="google-api-python-client", 83 version=version, 84 description="Google API Client Library for Python", 85 long_description=long_desc, 86 author="Google Inc.", 87 url="http://github.com/google/google-api-python-client/", 88 install_requires=install_requires, 89 packages=packages, 90 package_data={}, 91 license="Apache 2.0", 92 keywords="google api client", 93 classifiers=[ 94 'Programming Language :: Python :: 2', 95 'Programming Language :: Python :: 2.6', 96 'Programming Language :: Python :: 2.7', 97 'Programming Language :: Python :: 3', 98 'Programming Language :: Python :: 3.3', 99 'Programming Language :: Python :: 3.4', 100 'Development Status :: 5 - Production/Stable', 101 'Intended Audience :: Developers', 102 'License :: OSI Approved :: Apache Software License', 103 'Operating System :: OS Independent', 104 'Topic :: Internet :: WWW/HTTP', 105 ], 106 ) 107 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -66,7 +66,7 @@ 'httplib2>=0.8,<1', 'oauth2client>=1.5.0,<4.0.0', 'six>=1.6.1,<2', - 'uritemplate>=0.6,<1', + 'uritemplate>=3.0.0,<4', ] if sys.version_info < (2, 7):
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,7 @@\n 'httplib2>=0.8,<1',\n 'oauth2client>=1.5.0,<4.0.0',\n 'six>=1.6.1,<2',\n- 'uritemplate>=0.6,<1',\n+ 'uritemplate>=3.0.0,<4',\n ]\n \n if sys.version_info < (2, 7):\n", "issue": "Switch from \"uritemplate\" to \"uritemplate.py\"\nThere are at least 2 URI template packages on the PyPI: [uritemplate](https://pypi.python.org/pypi/uritemplate/0.6) and [uritemplate.py](https://pypi.python.org/pypi/uritemplate.py/0.3.0). Both of these packages use `uritemplate` as the package name to be imported, which causes a conflict when both are installed (see https://github.com/sigmavirus24/uritemplate/issues/14).\n\nI propose that this project depend on `uritemplate.py` because:\n- `uritemplate.py` is API-compatible with `uritemplate`, but the reverse is not true. I have confirmed that this library works with uritemplate.py.\n- Other projects depend on `uritemplate.py`. For example, we are currently working on a project that depends on both `github3.py` (GitHub API client) and `google-api-python-client`. Installing both results in an immediate `ImportError` due to the `uritemplate` conflict.\n\nThis is a simple, low-risk change that would aid compatibility with other projects.\n\nThoughts?\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 6):\n print('google-api-python-client requires python version >= 2.6.',\n file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 3):\n print('google-api-python-client requires python3 version >= 3.3.',\n file=sys.stderr)\n sys.exit(1)\n\nfrom setuptools import setup\nimport pkg_resources\n\ndef _DetectBadness():\n import os\n if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:\n return\n o2c_pkg = None\n try:\n o2c_pkg = pkg_resources.get_distribution('oauth2client')\n except pkg_resources.DistributionNotFound:\n pass\n oauth2client = None\n try:\n import oauth2client\n except ImportError:\n pass\n if o2c_pkg is None and oauth2client is not None:\n raise RuntimeError(\n 'Previous version of google-api-python-client detected; due to a '\n 'packaging issue, we cannot perform an in-place upgrade. Please remove '\n 'the old version and re-install this package.'\n )\n\n_DetectBadness()\n\npackages = [\n 'apiclient',\n 'googleapiclient',\n 'googleapiclient/discovery_cache',\n]\n\ninstall_requires = [\n 'httplib2>=0.8,<1',\n 'oauth2client>=1.5.0,<4.0.0',\n 'six>=1.6.1,<2',\n 'uritemplate>=0.6,<1',\n]\n\nif sys.version_info < (2, 7):\n install_requires.append('argparse')\n\nlong_desc = \"\"\"The Google API Client for Python is a client library for\naccessing the Plus, Moderator, and many other Google APIs.\"\"\"\n\nimport googleapiclient\nversion = googleapiclient.__version__\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=long_desc,\n author=\"Google Inc.\",\n url=\"http://github.com/google/google-api-python-client/\",\n install_requires=install_requires,\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 6):\n print('google-api-python-client requires python version >= 2.6.',\n file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 3):\n print('google-api-python-client requires python3 version >= 3.3.',\n file=sys.stderr)\n sys.exit(1)\n\nfrom setuptools import setup\nimport pkg_resources\n\ndef _DetectBadness():\n import os\n if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:\n return\n o2c_pkg = None\n try:\n o2c_pkg = pkg_resources.get_distribution('oauth2client')\n except pkg_resources.DistributionNotFound:\n pass\n oauth2client = None\n try:\n import oauth2client\n except ImportError:\n pass\n if o2c_pkg is None and oauth2client is not None:\n raise RuntimeError(\n 'Previous version of google-api-python-client detected; due to a '\n 'packaging issue, we cannot perform an in-place upgrade. Please remove '\n 'the old version and re-install this package.'\n )\n\n_DetectBadness()\n\npackages = [\n 'apiclient',\n 'googleapiclient',\n 'googleapiclient/discovery_cache',\n]\n\ninstall_requires = [\n 'httplib2>=0.8,<1',\n 'oauth2client>=1.5.0,<4.0.0',\n 'six>=1.6.1,<2',\n 'uritemplate>=3.0.0,<4',\n]\n\nif sys.version_info < (2, 7):\n install_requires.append('argparse')\n\nlong_desc = \"\"\"The Google API Client for Python is a client library for\naccessing the Plus, Moderator, and many other Google APIs.\"\"\"\n\nimport googleapiclient\nversion = googleapiclient.__version__\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=long_desc,\n author=\"Google Inc.\",\n url=\"http://github.com/google/google-api-python-client/\",\n install_requires=install_requires,\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py"}]}
1,520
115
gh_patches_debug_21178
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-1983
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Distros are coupled with instrumentations As mentioned [here](https://github.com/open-telemetry/opentelemetry-python/discussions/2005#discussion-3489738). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import sys 16 from logging import getLogger 17 from os import environ, path 18 from os.path import abspath, dirname, pathsep 19 from re import sub 20 21 from pkg_resources import iter_entry_points 22 23 from opentelemetry.environment_variables import ( 24 OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, 25 ) 26 from opentelemetry.instrumentation.dependencies import ( 27 get_dist_dependency_conflicts, 28 ) 29 from opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro 30 31 logger = getLogger(__file__) 32 33 34 def _load_distros() -> BaseDistro: 35 for entry_point in iter_entry_points("opentelemetry_distro"): 36 try: 37 distro = entry_point.load()() 38 if not isinstance(distro, BaseDistro): 39 logger.debug( 40 "%s is not an OpenTelemetry Distro. Skipping", 41 entry_point.name, 42 ) 43 continue 44 logger.debug( 45 "Distribution %s will be configured", entry_point.name 46 ) 47 return distro 48 except Exception as exc: # pylint: disable=broad-except 49 logger.exception( 50 "Distribution %s configuration failed", entry_point.name 51 ) 52 raise exc 53 return DefaultDistro() 54 55 56 def _load_instrumentors(distro): 57 package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, []) 58 if isinstance(package_to_exclude, str): 59 package_to_exclude = package_to_exclude.split(",") 60 # to handle users entering "requests , flask" or "requests, flask" with spaces 61 package_to_exclude = [x.strip() for x in package_to_exclude] 62 63 for entry_point in iter_entry_points("opentelemetry_instrumentor"): 64 if entry_point.name in package_to_exclude: 65 logger.debug( 66 "Instrumentation skipped for library %s", entry_point.name 67 ) 68 continue 69 70 try: 71 conflict = get_dist_dependency_conflicts(entry_point.dist) 72 if conflict: 73 logger.debug( 74 "Skipping instrumentation %s: %s", 75 entry_point.name, 76 conflict, 77 ) 78 continue 79 80 # tell instrumentation to not run dep checks again as we already did it above 81 distro.load_instrumentor(entry_point, skip_dep_check=True) 82 logger.debug("Instrumented %s", entry_point.name) 83 except Exception as exc: # pylint: disable=broad-except 84 logger.exception("Instrumenting of %s failed", entry_point.name) 85 raise exc 86 87 88 def _load_configurators(): 89 configured = None 90 for entry_point in iter_entry_points("opentelemetry_configurator"): 91 if configured is not None: 92 logger.warning( 93 "Configuration of %s not loaded, %s already loaded", 94 entry_point.name, 95 configured, 96 ) 97 continue 98 try: 99 entry_point.load()().configure() # type: ignore 100 configured = entry_point.name 101 except Exception as exc: # pylint: disable=broad-except 102 logger.exception("Configuration of %s failed", entry_point.name) 103 raise exc 104 105 106 def initialize(): 107 try: 108 distro = _load_distros() 109 distro.configure() 110 _load_configurators() 111 _load_instrumentors(distro) 112 except Exception: # pylint: disable=broad-except 113 logger.exception("Failed to auto initialize opentelemetry") 114 finally: 115 environ["PYTHONPATH"] = sub( 116 r"{}{}?".format(dirname(abspath(__file__)), pathsep), 117 "", 118 environ["PYTHONPATH"], 119 ) 120 121 122 if ( 123 hasattr(sys, "argv") 124 and sys.argv[0].split(path.sep)[-1] == "celery" 125 and "worker" in sys.argv[1:] 126 ): 127 from celery.signals import worker_process_init # pylint:disable=E0401 128 129 @worker_process_init.connect(weak=False) 130 def init_celery(*args, **kwargs): 131 initialize() 132 133 134 else: 135 initialize() 136 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py @@ -60,6 +60,9 @@ # to handle users entering "requests , flask" or "requests, flask" with spaces package_to_exclude = [x.strip() for x in package_to_exclude] + for entry_point in iter_entry_points("opentelemetry_pre_instrument"): + entry_point.load()() + for entry_point in iter_entry_points("opentelemetry_instrumentor"): if entry_point.name in package_to_exclude: logger.debug( @@ -84,6 +87,9 @@ logger.exception("Instrumenting of %s failed", entry_point.name) raise exc + for entry_point in iter_entry_points("opentelemetry_post_instrument"): + entry_point.load()() + def _load_configurators(): configured = None
{"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n@@ -60,6 +60,9 @@\n # to handle users entering \"requests , flask\" or \"requests, flask\" with spaces\n package_to_exclude = [x.strip() for x in package_to_exclude]\n \n+ for entry_point in iter_entry_points(\"opentelemetry_pre_instrument\"):\n+ entry_point.load()()\n+\n for entry_point in iter_entry_points(\"opentelemetry_instrumentor\"):\n if entry_point.name in package_to_exclude:\n logger.debug(\n@@ -84,6 +87,9 @@\n logger.exception(\"Instrumenting of %s failed\", entry_point.name)\n raise exc\n \n+ for entry_point in iter_entry_points(\"opentelemetry_post_instrument\"):\n+ entry_point.load()()\n+\n \n def _load_configurators():\n configured = None\n", "issue": "Distros are coupled with instrumentations\nAs mentioned [here](https://github.com/open-telemetry/opentelemetry-python/discussions/2005#discussion-3489738).\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom logging import getLogger\nfrom os import environ, path\nfrom os.path import abspath, dirname, pathsep\nfrom re import sub\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry.environment_variables import (\n OTEL_PYTHON_DISABLED_INSTRUMENTATIONS,\n)\nfrom opentelemetry.instrumentation.dependencies import (\n get_dist_dependency_conflicts,\n)\nfrom opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro\n\nlogger = getLogger(__file__)\n\n\ndef _load_distros() -> BaseDistro:\n for entry_point in iter_entry_points(\"opentelemetry_distro\"):\n try:\n distro = entry_point.load()()\n if not isinstance(distro, BaseDistro):\n logger.debug(\n \"%s is not an OpenTelemetry Distro. Skipping\",\n entry_point.name,\n )\n continue\n logger.debug(\n \"Distribution %s will be configured\", entry_point.name\n )\n return distro\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\n \"Distribution %s configuration failed\", entry_point.name\n )\n raise exc\n return DefaultDistro()\n\n\ndef _load_instrumentors(distro):\n package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, [])\n if isinstance(package_to_exclude, str):\n package_to_exclude = package_to_exclude.split(\",\")\n # to handle users entering \"requests , flask\" or \"requests, flask\" with spaces\n package_to_exclude = [x.strip() for x in package_to_exclude]\n\n for entry_point in iter_entry_points(\"opentelemetry_instrumentor\"):\n if entry_point.name in package_to_exclude:\n logger.debug(\n \"Instrumentation skipped for library %s\", entry_point.name\n )\n continue\n\n try:\n conflict = get_dist_dependency_conflicts(entry_point.dist)\n if conflict:\n logger.debug(\n \"Skipping instrumentation %s: %s\",\n entry_point.name,\n conflict,\n )\n continue\n\n # tell instrumentation to not run dep checks again as we already did it above\n distro.load_instrumentor(entry_point, skip_dep_check=True)\n logger.debug(\"Instrumented %s\", entry_point.name)\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Instrumenting of %s failed\", entry_point.name)\n raise exc\n\n\ndef _load_configurators():\n configured = None\n for entry_point in iter_entry_points(\"opentelemetry_configurator\"):\n if configured is not None:\n logger.warning(\n \"Configuration of %s not loaded, %s already loaded\",\n entry_point.name,\n configured,\n )\n continue\n try:\n entry_point.load()().configure() # type: ignore\n configured = entry_point.name\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Configuration of %s failed\", entry_point.name)\n raise exc\n\n\ndef initialize():\n try:\n distro = _load_distros()\n distro.configure()\n _load_configurators()\n _load_instrumentors(distro)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to auto initialize opentelemetry\")\n finally:\n environ[\"PYTHONPATH\"] = sub(\n r\"{}{}?\".format(dirname(abspath(__file__)), pathsep),\n \"\",\n environ[\"PYTHONPATH\"],\n )\n\n\nif (\n hasattr(sys, \"argv\")\n and sys.argv[0].split(path.sep)[-1] == \"celery\"\n and \"worker\" in sys.argv[1:]\n):\n from celery.signals import worker_process_init # pylint:disable=E0401\n\n @worker_process_init.connect(weak=False)\n def init_celery(*args, **kwargs):\n initialize()\n\n\nelse:\n initialize()\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom logging import getLogger\nfrom os import environ, path\nfrom os.path import abspath, dirname, pathsep\nfrom re import sub\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry.environment_variables import (\n OTEL_PYTHON_DISABLED_INSTRUMENTATIONS,\n)\nfrom opentelemetry.instrumentation.dependencies import (\n get_dist_dependency_conflicts,\n)\nfrom opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro\n\nlogger = getLogger(__file__)\n\n\ndef _load_distros() -> BaseDistro:\n for entry_point in iter_entry_points(\"opentelemetry_distro\"):\n try:\n distro = entry_point.load()()\n if not isinstance(distro, BaseDistro):\n logger.debug(\n \"%s is not an OpenTelemetry Distro. Skipping\",\n entry_point.name,\n )\n continue\n logger.debug(\n \"Distribution %s will be configured\", entry_point.name\n )\n return distro\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\n \"Distribution %s configuration failed\", entry_point.name\n )\n raise exc\n return DefaultDistro()\n\n\ndef _load_instrumentors(distro):\n package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, [])\n if isinstance(package_to_exclude, str):\n package_to_exclude = package_to_exclude.split(\",\")\n # to handle users entering \"requests , flask\" or \"requests, flask\" with spaces\n package_to_exclude = [x.strip() for x in package_to_exclude]\n\n for entry_point in iter_entry_points(\"opentelemetry_pre_instrument\"):\n entry_point.load()()\n\n for entry_point in iter_entry_points(\"opentelemetry_instrumentor\"):\n if entry_point.name in package_to_exclude:\n logger.debug(\n \"Instrumentation skipped for library %s\", entry_point.name\n )\n continue\n\n try:\n conflict = get_dist_dependency_conflicts(entry_point.dist)\n if conflict:\n logger.debug(\n \"Skipping instrumentation %s: %s\",\n entry_point.name,\n conflict,\n )\n continue\n\n # tell instrumentation to not run dep checks again as we already did it above\n distro.load_instrumentor(entry_point, skip_dep_check=True)\n logger.debug(\"Instrumented %s\", entry_point.name)\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Instrumenting of %s failed\", entry_point.name)\n raise exc\n\n for entry_point in iter_entry_points(\"opentelemetry_post_instrument\"):\n entry_point.load()()\n\n\ndef _load_configurators():\n configured = None\n for entry_point in iter_entry_points(\"opentelemetry_configurator\"):\n if configured is not None:\n logger.warning(\n \"Configuration of %s not loaded, %s already loaded\",\n entry_point.name,\n configured,\n )\n continue\n try:\n entry_point.load()().configure() # type: ignore\n configured = entry_point.name\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Configuration of %s failed\", entry_point.name)\n raise exc\n\n\ndef initialize():\n try:\n distro = _load_distros()\n distro.configure()\n _load_configurators()\n _load_instrumentors(distro)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to auto initialize opentelemetry\")\n finally:\n environ[\"PYTHONPATH\"] = sub(\n r\"{}{}?\".format(dirname(abspath(__file__)), pathsep),\n \"\",\n environ[\"PYTHONPATH\"],\n )\n\n\nif (\n hasattr(sys, \"argv\")\n and sys.argv[0].split(path.sep)[-1] == \"celery\"\n and \"worker\" in sys.argv[1:]\n):\n from celery.signals import worker_process_init # pylint:disable=E0401\n\n @worker_process_init.connect(weak=False)\n def init_celery(*args, **kwargs):\n initialize()\n\n\nelse:\n initialize()\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py"}]}
1,579
265
gh_patches_debug_36968
rasdani/github-patches
git_diff
ESMCI__cime-1436
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add --component option to preview_namelists It would be useful if users could specify a single component when running `preview_namelists` rather than building namelists for all components when the script is run from the command line in a case directory. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/lib/CIME/preview_namelists.py` Content: ``` 1 """ 2 API for preview namelist 3 """ 4 5 from CIME.XML.standard_module_setup import * 6 7 import glob, shutil, imp 8 logger = logging.getLogger(__name__) 9 10 def create_dirs(case): 11 """ 12 Make necessary directories for case 13 """ 14 # Get data from XML 15 exeroot = case.get_value("EXEROOT") 16 libroot = case.get_value("LIBROOT") 17 incroot = case.get_value("INCROOT") 18 rundir = case.get_value("RUNDIR") 19 caseroot = case.get_value("CASEROOT") 20 21 docdir = os.path.join(caseroot, "CaseDocs") 22 dirs_to_make = [] 23 models = case.get_values("COMP_CLASSES") 24 for model in models: 25 dirname = model.lower() 26 dirs_to_make.append(os.path.join(exeroot, dirname, "obj")) 27 28 dirs_to_make.extend([exeroot, libroot, incroot, rundir, docdir]) 29 30 for dir_to_make in dirs_to_make: 31 if (not os.path.isdir(dir_to_make)): 32 try: 33 logger.debug("Making dir '%s'" % dir_to_make) 34 os.makedirs(dir_to_make) 35 except OSError as e: 36 expect(False, "Could not make directory '%s', error: %s" % (dir_to_make, e)) 37 38 # As a convenience write the location of the case directory in the bld and run directories 39 for dir_ in (exeroot, rundir): 40 with open(os.path.join(dir_,"CASEROOT"),"w+") as fd: 41 fd.write(caseroot+"\n") 42 43 def create_namelists(case): 44 """ 45 Create component namelists 46 """ 47 case.flush() 48 49 create_dirs(case) 50 51 casebuild = case.get_value("CASEBUILD") 52 caseroot = case.get_value("CASEROOT") 53 rundir = case.get_value("RUNDIR") 54 55 docdir = os.path.join(caseroot, "CaseDocs") 56 57 # Load modules 58 case.load_env() 59 60 logger.info("Creating component namelists") 61 62 # Create namelists - must have cpl last in the list below 63 # Note - cpl must be last in the loop below so that in generating its namelist, 64 # it can use xml vars potentially set by other component's buildnml scripts 65 models = case.get_values("COMP_CLASSES") 66 models += [models.pop(0)] 67 for model in models: 68 model_str = model.lower() 69 config_file = case.get_value("CONFIG_%s_FILE" % model_str.upper()) 70 config_dir = os.path.dirname(config_file) 71 if model_str == "cpl": 72 compname = "drv" 73 else: 74 compname = case.get_value("COMP_%s" % model_str.upper()) 75 76 cmd = os.path.join(config_dir, "buildnml") 77 do_run_cmd = False 78 # This code will try to import and run each buildnml as a subroutine 79 # if that fails it will run it as a program in a seperate shell 80 try: 81 with open(cmd, 'r') as f: 82 first_line = f.readline() 83 if "python" in first_line: 84 mod = imp.load_source("buildnml", cmd) 85 logger.info(" Calling %s buildnml"%compname) 86 mod.buildnml(case, caseroot, compname) 87 else: 88 raise SyntaxError 89 except SyntaxError as detail: 90 if 'python' in first_line: 91 expect(False, detail) 92 else: 93 do_run_cmd = True 94 except AttributeError: 95 do_run_cmd = True 96 except: 97 raise 98 99 if do_run_cmd: 100 logger.info(" Running %s buildnml"%compname) 101 case.flush() 102 output = run_cmd_no_fail("%s %s" % (cmd, caseroot), verbose=False) 103 logger.info(output) 104 # refresh case xml object from file 105 case.read_xml() 106 107 logger.info("Finished creating component namelists") 108 109 # Save namelists to docdir 110 if (not os.path.isdir(docdir)): 111 os.makedirs(docdir) 112 try: 113 with open(os.path.join(docdir, "README"), "w") as fd: 114 fd.write(" CESM Resolved Namelist Files\n For documentation only DO NOT MODIFY\n") 115 except (OSError, IOError) as e: 116 expect(False, "Failed to write %s/README: %s" % (docdir, e)) 117 118 for cpglob in ["*_in_[0-9]*", "*modelio*", "*_in", 119 "*streams*txt*", "*stxt", "*maps.rc", "*cism.config*"]: 120 for file_to_copy in glob.glob(os.path.join(rundir, cpglob)): 121 logger.debug("Copy file from '%s' to '%s'" % (file_to_copy, docdir)) 122 shutil.copy2(file_to_copy, docdir) 123 124 # Copy over chemistry mechanism docs if they exist 125 if (os.path.isdir(os.path.join(casebuild, "camconf"))): 126 for file_to_copy in glob.glob(os.path.join(casebuild, "camconf", "*chem_mech*")): 127 shutil.copy2(file_to_copy, docdir) 128 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/lib/CIME/preview_namelists.py b/scripts/lib/CIME/preview_namelists.py --- a/scripts/lib/CIME/preview_namelists.py +++ b/scripts/lib/CIME/preview_namelists.py @@ -40,7 +40,7 @@ with open(os.path.join(dir_,"CASEROOT"),"w+") as fd: fd.write(caseroot+"\n") -def create_namelists(case): +def create_namelists(case, component=None): """ Create component namelists """ @@ -73,36 +73,37 @@ else: compname = case.get_value("COMP_%s" % model_str.upper()) - cmd = os.path.join(config_dir, "buildnml") - do_run_cmd = False - # This code will try to import and run each buildnml as a subroutine - # if that fails it will run it as a program in a seperate shell - try: - with open(cmd, 'r') as f: - first_line = f.readline() - if "python" in first_line: - mod = imp.load_source("buildnml", cmd) - logger.info(" Calling %s buildnml"%compname) - mod.buildnml(case, caseroot, compname) - else: - raise SyntaxError - except SyntaxError as detail: - if 'python' in first_line: - expect(False, detail) - else: + if component is None or component == model_str: + cmd = os.path.join(config_dir, "buildnml") + do_run_cmd = False + # This code will try to import and run each buildnml as a subroutine + # if that fails it will run it as a program in a seperate shell + try: + with open(cmd, 'r') as f: + first_line = f.readline() + if "python" in first_line: + mod = imp.load_source("buildnml", cmd) + logger.info(" Calling %s buildnml"%compname) + mod.buildnml(case, caseroot, compname) + else: + raise SyntaxError + except SyntaxError as detail: + if 'python' in first_line: + expect(False, detail) + else: + do_run_cmd = True + except AttributeError: do_run_cmd = True - except AttributeError: - do_run_cmd = True - except: - raise - - if do_run_cmd: - logger.info(" Running %s buildnml"%compname) - case.flush() - output = run_cmd_no_fail("%s %s" % (cmd, caseroot), verbose=False) - logger.info(output) - # refresh case xml object from file - case.read_xml() + except: + raise + + if do_run_cmd: + logger.info(" Running %s buildnml"%compname) + case.flush() + output = run_cmd_no_fail("%s %s" % (cmd, caseroot), verbose=False) + logger.info(output) + # refresh case xml object from file + case.read_xml() logger.info("Finished creating component namelists")
{"golden_diff": "diff --git a/scripts/lib/CIME/preview_namelists.py b/scripts/lib/CIME/preview_namelists.py\n--- a/scripts/lib/CIME/preview_namelists.py\n+++ b/scripts/lib/CIME/preview_namelists.py\n@@ -40,7 +40,7 @@\n with open(os.path.join(dir_,\"CASEROOT\"),\"w+\") as fd:\n fd.write(caseroot+\"\\n\")\n \n-def create_namelists(case):\n+def create_namelists(case, component=None):\n \"\"\"\n Create component namelists\n \"\"\"\n@@ -73,36 +73,37 @@\n else:\n compname = case.get_value(\"COMP_%s\" % model_str.upper())\n \n- cmd = os.path.join(config_dir, \"buildnml\")\n- do_run_cmd = False\n- # This code will try to import and run each buildnml as a subroutine\n- # if that fails it will run it as a program in a seperate shell\n- try:\n- with open(cmd, 'r') as f:\n- first_line = f.readline()\n- if \"python\" in first_line:\n- mod = imp.load_source(\"buildnml\", cmd)\n- logger.info(\" Calling %s buildnml\"%compname)\n- mod.buildnml(case, caseroot, compname)\n- else:\n- raise SyntaxError\n- except SyntaxError as detail:\n- if 'python' in first_line:\n- expect(False, detail)\n- else:\n+ if component is None or component == model_str:\n+ cmd = os.path.join(config_dir, \"buildnml\")\n+ do_run_cmd = False\n+ # This code will try to import and run each buildnml as a subroutine\n+ # if that fails it will run it as a program in a seperate shell\n+ try:\n+ with open(cmd, 'r') as f:\n+ first_line = f.readline()\n+ if \"python\" in first_line:\n+ mod = imp.load_source(\"buildnml\", cmd)\n+ logger.info(\" Calling %s buildnml\"%compname)\n+ mod.buildnml(case, caseroot, compname)\n+ else:\n+ raise SyntaxError\n+ except SyntaxError as detail:\n+ if 'python' in first_line:\n+ expect(False, detail)\n+ else:\n+ do_run_cmd = True\n+ except AttributeError:\n do_run_cmd = True\n- except AttributeError:\n- do_run_cmd = True\n- except:\n- raise\n-\n- if do_run_cmd:\n- logger.info(\" Running %s buildnml\"%compname)\n- case.flush()\n- output = run_cmd_no_fail(\"%s %s\" % (cmd, caseroot), verbose=False)\n- logger.info(output)\n- # refresh case xml object from file\n- case.read_xml()\n+ except:\n+ raise\n+\n+ if do_run_cmd:\n+ logger.info(\" Running %s buildnml\"%compname)\n+ case.flush()\n+ output = run_cmd_no_fail(\"%s %s\" % (cmd, caseroot), verbose=False)\n+ logger.info(output)\n+ # refresh case xml object from file\n+ case.read_xml()\n \n logger.info(\"Finished creating component namelists\")\n", "issue": "Add --component option to preview_namelists\nIt would be useful if users could specify a single component when running `preview_namelists` rather than building namelists for all components when the script is run from the command line in a case directory.\n", "before_files": [{"content": "\"\"\"\nAPI for preview namelist\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nimport glob, shutil, imp\nlogger = logging.getLogger(__name__)\n\ndef create_dirs(case):\n \"\"\"\n Make necessary directories for case\n \"\"\"\n # Get data from XML\n exeroot = case.get_value(\"EXEROOT\")\n libroot = case.get_value(\"LIBROOT\")\n incroot = case.get_value(\"INCROOT\")\n rundir = case.get_value(\"RUNDIR\")\n caseroot = case.get_value(\"CASEROOT\")\n\n docdir = os.path.join(caseroot, \"CaseDocs\")\n dirs_to_make = []\n models = case.get_values(\"COMP_CLASSES\")\n for model in models:\n dirname = model.lower()\n dirs_to_make.append(os.path.join(exeroot, dirname, \"obj\"))\n\n dirs_to_make.extend([exeroot, libroot, incroot, rundir, docdir])\n\n for dir_to_make in dirs_to_make:\n if (not os.path.isdir(dir_to_make)):\n try:\n logger.debug(\"Making dir '%s'\" % dir_to_make)\n os.makedirs(dir_to_make)\n except OSError as e:\n expect(False, \"Could not make directory '%s', error: %s\" % (dir_to_make, e))\n\n # As a convenience write the location of the case directory in the bld and run directories\n for dir_ in (exeroot, rundir):\n with open(os.path.join(dir_,\"CASEROOT\"),\"w+\") as fd:\n fd.write(caseroot+\"\\n\")\n\ndef create_namelists(case):\n \"\"\"\n Create component namelists\n \"\"\"\n case.flush()\n\n create_dirs(case)\n\n casebuild = case.get_value(\"CASEBUILD\")\n caseroot = case.get_value(\"CASEROOT\")\n rundir = case.get_value(\"RUNDIR\")\n\n docdir = os.path.join(caseroot, \"CaseDocs\")\n\n # Load modules\n case.load_env()\n\n logger.info(\"Creating component namelists\")\n\n # Create namelists - must have cpl last in the list below\n # Note - cpl must be last in the loop below so that in generating its namelist,\n # it can use xml vars potentially set by other component's buildnml scripts\n models = case.get_values(\"COMP_CLASSES\")\n models += [models.pop(0)]\n for model in models:\n model_str = model.lower()\n config_file = case.get_value(\"CONFIG_%s_FILE\" % model_str.upper())\n config_dir = os.path.dirname(config_file)\n if model_str == \"cpl\":\n compname = \"drv\"\n else:\n compname = case.get_value(\"COMP_%s\" % model_str.upper())\n\n cmd = os.path.join(config_dir, \"buildnml\")\n do_run_cmd = False\n # This code will try to import and run each buildnml as a subroutine\n # if that fails it will run it as a program in a seperate shell\n try:\n with open(cmd, 'r') as f:\n first_line = f.readline()\n if \"python\" in first_line:\n mod = imp.load_source(\"buildnml\", cmd)\n logger.info(\" Calling %s buildnml\"%compname)\n mod.buildnml(case, caseroot, compname)\n else:\n raise SyntaxError\n except SyntaxError as detail:\n if 'python' in first_line:\n expect(False, detail)\n else:\n do_run_cmd = True\n except AttributeError:\n do_run_cmd = True\n except:\n raise\n\n if do_run_cmd:\n logger.info(\" Running %s buildnml\"%compname)\n case.flush()\n output = run_cmd_no_fail(\"%s %s\" % (cmd, caseroot), verbose=False)\n logger.info(output)\n # refresh case xml object from file\n case.read_xml()\n\n logger.info(\"Finished creating component namelists\")\n\n # Save namelists to docdir\n if (not os.path.isdir(docdir)):\n os.makedirs(docdir)\n try:\n with open(os.path.join(docdir, \"README\"), \"w\") as fd:\n fd.write(\" CESM Resolved Namelist Files\\n For documentation only DO NOT MODIFY\\n\")\n except (OSError, IOError) as e:\n expect(False, \"Failed to write %s/README: %s\" % (docdir, e))\n\n for cpglob in [\"*_in_[0-9]*\", \"*modelio*\", \"*_in\",\n \"*streams*txt*\", \"*stxt\", \"*maps.rc\", \"*cism.config*\"]:\n for file_to_copy in glob.glob(os.path.join(rundir, cpglob)):\n logger.debug(\"Copy file from '%s' to '%s'\" % (file_to_copy, docdir))\n shutil.copy2(file_to_copy, docdir)\n\n # Copy over chemistry mechanism docs if they exist\n if (os.path.isdir(os.path.join(casebuild, \"camconf\"))):\n for file_to_copy in glob.glob(os.path.join(casebuild, \"camconf\", \"*chem_mech*\")):\n shutil.copy2(file_to_copy, docdir)\n", "path": "scripts/lib/CIME/preview_namelists.py"}], "after_files": [{"content": "\"\"\"\nAPI for preview namelist\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nimport glob, shutil, imp\nlogger = logging.getLogger(__name__)\n\ndef create_dirs(case):\n \"\"\"\n Make necessary directories for case\n \"\"\"\n # Get data from XML\n exeroot = case.get_value(\"EXEROOT\")\n libroot = case.get_value(\"LIBROOT\")\n incroot = case.get_value(\"INCROOT\")\n rundir = case.get_value(\"RUNDIR\")\n caseroot = case.get_value(\"CASEROOT\")\n\n docdir = os.path.join(caseroot, \"CaseDocs\")\n dirs_to_make = []\n models = case.get_values(\"COMP_CLASSES\")\n for model in models:\n dirname = model.lower()\n dirs_to_make.append(os.path.join(exeroot, dirname, \"obj\"))\n\n dirs_to_make.extend([exeroot, libroot, incroot, rundir, docdir])\n\n for dir_to_make in dirs_to_make:\n if (not os.path.isdir(dir_to_make)):\n try:\n logger.debug(\"Making dir '%s'\" % dir_to_make)\n os.makedirs(dir_to_make)\n except OSError as e:\n expect(False, \"Could not make directory '%s', error: %s\" % (dir_to_make, e))\n\n # As a convenience write the location of the case directory in the bld and run directories\n for dir_ in (exeroot, rundir):\n with open(os.path.join(dir_,\"CASEROOT\"),\"w+\") as fd:\n fd.write(caseroot+\"\\n\")\n\ndef create_namelists(case, component=None):\n \"\"\"\n Create component namelists\n \"\"\"\n case.flush()\n\n create_dirs(case)\n\n casebuild = case.get_value(\"CASEBUILD\")\n caseroot = case.get_value(\"CASEROOT\")\n rundir = case.get_value(\"RUNDIR\")\n\n docdir = os.path.join(caseroot, \"CaseDocs\")\n\n # Load modules\n case.load_env()\n\n logger.info(\"Creating component namelists\")\n\n # Create namelists - must have cpl last in the list below\n # Note - cpl must be last in the loop below so that in generating its namelist,\n # it can use xml vars potentially set by other component's buildnml scripts\n models = case.get_values(\"COMP_CLASSES\")\n models += [models.pop(0)]\n for model in models:\n model_str = model.lower()\n config_file = case.get_value(\"CONFIG_%s_FILE\" % model_str.upper())\n config_dir = os.path.dirname(config_file)\n if model_str == \"cpl\":\n compname = \"drv\"\n else:\n compname = case.get_value(\"COMP_%s\" % model_str.upper())\n\n if component is None or component == model_str:\n cmd = os.path.join(config_dir, \"buildnml\")\n do_run_cmd = False\n # This code will try to import and run each buildnml as a subroutine\n # if that fails it will run it as a program in a seperate shell\n try:\n with open(cmd, 'r') as f:\n first_line = f.readline()\n if \"python\" in first_line:\n mod = imp.load_source(\"buildnml\", cmd)\n logger.info(\" Calling %s buildnml\"%compname)\n mod.buildnml(case, caseroot, compname)\n else:\n raise SyntaxError\n except SyntaxError as detail:\n if 'python' in first_line:\n expect(False, detail)\n else:\n do_run_cmd = True\n except AttributeError:\n do_run_cmd = True\n except:\n raise\n\n if do_run_cmd:\n logger.info(\" Running %s buildnml\"%compname)\n case.flush()\n output = run_cmd_no_fail(\"%s %s\" % (cmd, caseroot), verbose=False)\n logger.info(output)\n # refresh case xml object from file\n case.read_xml()\n\n logger.info(\"Finished creating component namelists\")\n\n # Save namelists to docdir\n if (not os.path.isdir(docdir)):\n os.makedirs(docdir)\n try:\n with open(os.path.join(docdir, \"README\"), \"w\") as fd:\n fd.write(\" CESM Resolved Namelist Files\\n For documentation only DO NOT MODIFY\\n\")\n except (OSError, IOError) as e:\n expect(False, \"Failed to write %s/README: %s\" % (docdir, e))\n\n for cpglob in [\"*_in_[0-9]*\", \"*modelio*\", \"*_in\",\n \"*streams*txt*\", \"*stxt\", \"*maps.rc\", \"*cism.config*\"]:\n for file_to_copy in glob.glob(os.path.join(rundir, cpglob)):\n logger.debug(\"Copy file from '%s' to '%s'\" % (file_to_copy, docdir))\n shutil.copy2(file_to_copy, docdir)\n\n # Copy over chemistry mechanism docs if they exist\n if (os.path.isdir(os.path.join(casebuild, \"camconf\"))):\n for file_to_copy in glob.glob(os.path.join(casebuild, \"camconf\", \"*chem_mech*\")):\n shutil.copy2(file_to_copy, docdir)\n", "path": "scripts/lib/CIME/preview_namelists.py"}]}
1,721
735
gh_patches_debug_1633
rasdani/github-patches
git_diff
microsoft__botbuilder-python-2050
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- botbuidler support for regex== 2022 and above Description: I'm currently working on building a chatbot using Azure Bot Builder SDK in conjunction with OpenAI. In my project, I'm relying on the OpenAIEmbedding class from the langchain package, which utilizes Tiktoken. However, I've run into an issue due to dependency conflicts with Tiktoken. Specifically, Tiktoken requires regex version 2022 or higher, while the Bot Builder package supports only up to regex version 2019. Feature Request: I kindly request adding support for Tiktoken's regex version 2022 or higher in the OpenAIEmbedding class within the langchain package. This update would resolve the dependency conflicts and enable smoother integration of OpenAI into projects using Azure Bot Builder SDK. Additional Information: Current Behavior: Currently, the OpenAIEmbedding class in langchain relies on Tiktoken, which necessitates a regex version that is not compatible with the Bot Builder SDK's regex version support. Desired Behavior: The botbuilder classes should be updated to support Tiktoken's dependency on regex version 2022 or higher t Impact of the Feature: This feature would benefit developers working on chatbot projects that use Azure Bot Builder SDK and OpenAI. It would eliminate dependency conflicts, allowing for a seamless integration experience. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libraries/botbuilder-dialogs/setup.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 import os 5 from setuptools import setup 6 7 REQUIRES = [ 8 "regex<=2019.08.19", 9 "emoji==1.7.0", 10 "recognizers-text-date-time>=1.0.2a1", 11 "recognizers-text-number-with-unit>=1.0.2a1", 12 "recognizers-text-number>=1.0.2a1", 13 "recognizers-text>=1.0.2a1", 14 "recognizers-text-choice>=1.0.2a1", 15 "babel==2.9.1", 16 "botbuilder-schema==4.15.0", 17 "botframework-connector==4.15.0", 18 "botbuilder-core==4.15.0", 19 ] 20 21 TEST_REQUIRES = ["aiounittest==1.3.0"] 22 23 root = os.path.abspath(os.path.dirname(__file__)) 24 25 with open(os.path.join(root, "botbuilder", "dialogs", "about.py")) as f: 26 package_info = {} 27 info = f.read() 28 exec(info, package_info) 29 30 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f: 31 long_description = f.read() 32 33 setup( 34 name=package_info["__title__"], 35 version=package_info["__version__"], 36 url=package_info["__uri__"], 37 author=package_info["__author__"], 38 description=package_info["__description__"], 39 keywords=["BotBuilderDialogs", "bots", "ai", "botframework", "botbuilder"], 40 long_description=long_description, 41 long_description_content_type="text/x-rst", 42 license=package_info["__license__"], 43 packages=[ 44 "botbuilder.dialogs", 45 "botbuilder.dialogs.prompts", 46 "botbuilder.dialogs.choices", 47 "botbuilder.dialogs.skills", 48 "botbuilder.dialogs.memory", 49 "botbuilder.dialogs.memory.path_resolvers", 50 "botbuilder.dialogs.memory.scopes", 51 ], 52 install_requires=REQUIRES + TEST_REQUIRES, 53 tests_require=TEST_REQUIRES, 54 include_package_data=True, 55 classifiers=[ 56 "Programming Language :: Python :: 3.7", 57 "Intended Audience :: Developers", 58 "License :: OSI Approved :: MIT License", 59 "Operating System :: OS Independent", 60 "Development Status :: 5 - Production/Stable", 61 "Topic :: Scientific/Engineering :: Artificial Intelligence", 62 ], 63 ) 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libraries/botbuilder-dialogs/setup.py b/libraries/botbuilder-dialogs/setup.py --- a/libraries/botbuilder-dialogs/setup.py +++ b/libraries/botbuilder-dialogs/setup.py @@ -5,7 +5,7 @@ from setuptools import setup REQUIRES = [ - "regex<=2019.08.19", + "regex>=2022.1.18", "emoji==1.7.0", "recognizers-text-date-time>=1.0.2a1", "recognizers-text-number-with-unit>=1.0.2a1",
{"golden_diff": "diff --git a/libraries/botbuilder-dialogs/setup.py b/libraries/botbuilder-dialogs/setup.py\n--- a/libraries/botbuilder-dialogs/setup.py\n+++ b/libraries/botbuilder-dialogs/setup.py\n@@ -5,7 +5,7 @@\n from setuptools import setup\n \n REQUIRES = [\n- \"regex<=2019.08.19\",\n+ \"regex>=2022.1.18\",\n \"emoji==1.7.0\",\n \"recognizers-text-date-time>=1.0.2a1\",\n \"recognizers-text-number-with-unit>=1.0.2a1\",\n", "issue": "botbuidler support for regex== 2022 and above\nDescription:\r\n\r\nI'm currently working on building a chatbot using Azure Bot Builder SDK in conjunction with OpenAI. In my project, I'm relying on the OpenAIEmbedding class from the langchain package, which utilizes Tiktoken. However, I've run into an issue due to dependency conflicts with Tiktoken. Specifically, Tiktoken requires regex version 2022 or higher, while the Bot Builder package supports only up to regex version 2019.\r\n\r\nFeature Request:\r\n\r\nI kindly request adding support for Tiktoken's regex version 2022 or higher in the OpenAIEmbedding class within the langchain package. This update would resolve the dependency conflicts and enable smoother integration of OpenAI into projects using Azure Bot Builder SDK.\r\n\r\nAdditional Information:\r\n\r\nCurrent Behavior: Currently, the OpenAIEmbedding class in langchain relies on Tiktoken, which necessitates a regex version that is not compatible with the Bot Builder SDK's regex version support.\r\n\r\nDesired Behavior: The botbuilder classes should be updated to support Tiktoken's dependency on regex version 2022 or higher t\r\n\r\nImpact of the Feature:\r\n\r\nThis feature would benefit developers working on chatbot projects that use Azure Bot Builder SDK and OpenAI. It would eliminate dependency conflicts, allowing for a seamless integration experience.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"regex<=2019.08.19\",\n \"emoji==1.7.0\",\n \"recognizers-text-date-time>=1.0.2a1\",\n \"recognizers-text-number-with-unit>=1.0.2a1\",\n \"recognizers-text-number>=1.0.2a1\",\n \"recognizers-text>=1.0.2a1\",\n \"recognizers-text-choice>=1.0.2a1\",\n \"babel==2.9.1\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n]\n\nTEST_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"dialogs\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderDialogs\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.dialogs\",\n \"botbuilder.dialogs.prompts\",\n \"botbuilder.dialogs.choices\",\n \"botbuilder.dialogs.skills\",\n \"botbuilder.dialogs.memory\",\n \"botbuilder.dialogs.memory.path_resolvers\",\n \"botbuilder.dialogs.memory.scopes\",\n ],\n install_requires=REQUIRES + TEST_REQUIRES,\n tests_require=TEST_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-dialogs/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"regex>=2022.1.18\",\n \"emoji==1.7.0\",\n \"recognizers-text-date-time>=1.0.2a1\",\n \"recognizers-text-number-with-unit>=1.0.2a1\",\n \"recognizers-text-number>=1.0.2a1\",\n \"recognizers-text>=1.0.2a1\",\n \"recognizers-text-choice>=1.0.2a1\",\n \"babel==2.9.1\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n]\n\nTEST_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"dialogs\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderDialogs\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.dialogs\",\n \"botbuilder.dialogs.prompts\",\n \"botbuilder.dialogs.choices\",\n \"botbuilder.dialogs.skills\",\n \"botbuilder.dialogs.memory\",\n \"botbuilder.dialogs.memory.path_resolvers\",\n \"botbuilder.dialogs.memory.scopes\",\n ],\n install_requires=REQUIRES + TEST_REQUIRES,\n tests_require=TEST_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-dialogs/setup.py"}]}
1,203
141
gh_patches_debug_21491
rasdani/github-patches
git_diff
plone__Products.CMFPlone-2793
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- verifydb fails with debug in some cases Using `./bin/instance verifydb -D` to check the `Data.fs` it fails in some cases: ``` Traceback (most recent call last): File "/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py", line 68, in verify_record class_info = unpickler.load() File "/Users/pbauer/.cache/buildout/eggs/ZODB-5.5.1-py3.7.egg/ZODB/_compat.py", line 62, in find_class return super(Unpickler, self).find_class(modulename, name) ModuleNotFoundError: No module named 'Products.Archetypes' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "./bin/instance", line 262, in <module> + sys.argv[1:])) File "/Users/pbauer/.cache/buildout/eggs/plone.recipe.zope2instance-6.1.3-py3.7.egg/plone/recipe/zope2instance/ctl.py", line 937, in main c.onecmd(' '.join(options.args)) File "/usr/local/Cellar/python/3.7.2_2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/cmd.py", line 217, in onecmd return func(arg) File "/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py", line 31, in zopectl_entry verify_zodb(app, debug=options.debug) File "/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py", line 50, in verify_zodb success = verify_record(oid, data, debug) File "/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py", line 82, in verify_record pickletools.dis(pickle[pos:]) UnboundLocalError: local variable 'pos' referenced before assignment ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Products/CMFPlone/_scripts/verifydb.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from Zope2.Startup.run import make_wsgi_app 3 from ZODB.interfaces import IStorageCurrentRecordIteration 4 from ZODB.serialize import PersistentUnpickler 5 6 import argparse 7 import io 8 import logging 9 import pdb 10 import pickletools 11 import sys 12 import traceback 13 import Zope2 14 15 logger = logging.getLogger('zodbverify') 16 17 18 def zopectl_entry(self, arg): 19 parser = argparse.ArgumentParser( 20 prog=sys.argv[0] + ' verifydb', 21 description='Verifies that all records in the database can be loaded.', 22 ) 23 parser.add_argument( 24 '-D', '--debug', action='store_true', dest='debug', 25 help='pause to debug broken pickles') 26 options = parser.parse_args(arg.split(' ') if arg else []) 27 28 logging.basicConfig(level=logging.INFO) 29 make_wsgi_app({}, self.options.configfile) 30 app = Zope2.app() 31 verify_zodb(app, debug=options.debug) 32 33 34 def verify_zodb(obj, debug=False): 35 storage = obj._p_jar._db._storage 36 if not IStorageCurrentRecordIteration.providedBy(storage): 37 raise TypeError( 38 'ZODB storage {} does not implement record_iternext'.format( 39 storage)) 40 41 logger.info('Scanning ZODB...') 42 43 next_ = None 44 count = 0 45 errors = 0 46 while True: 47 count += 1 48 oid, tid, data, next_ = storage.record_iternext(next_) 49 logger.debug('Verifying {}'.format(oid)) 50 success = verify_record(oid, data, debug) 51 if not success: 52 errors += 1 53 if next_ is None: 54 break 55 56 logger.info( 57 'Done! Scanned {} records. ' 58 'Found {} records that could not be loaded.'.format( 59 count, errors) 60 ) 61 62 63 def verify_record(oid, data, debug=False): 64 input_file = io.BytesIO(data) 65 unpickler = PersistentUnpickler(None, persistent_load, input_file) 66 class_info = 'unknown' 67 try: 68 class_info = unpickler.load() 69 pos = input_file.tell() 70 unpickler.load() 71 except Exception: 72 input_file.seek(0) 73 pickle = input_file.read() 74 logger.info('\nCould not process {} record {}:'.format( 75 class_info, 76 repr(oid), 77 )) 78 logger.info(repr(pickle)) 79 logger.info(traceback.format_exc()) 80 if debug: 81 try: 82 pickletools.dis(pickle[pos:]) 83 finally: 84 pdb.set_trace() 85 return False 86 return True 87 88 89 def persistent_load(ref): 90 pass 91 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Products/CMFPlone/_scripts/verifydb.py b/Products/CMFPlone/_scripts/verifydb.py --- a/Products/CMFPlone/_scripts/verifydb.py +++ b/Products/CMFPlone/_scripts/verifydb.py @@ -64,6 +64,7 @@ input_file = io.BytesIO(data) unpickler = PersistentUnpickler(None, persistent_load, input_file) class_info = 'unknown' + pos = None try: class_info = unpickler.load() pos = input_file.tell() @@ -77,11 +78,17 @@ )) logger.info(repr(pickle)) logger.info(traceback.format_exc()) - if debug: + if debug and pos is not None: try: pickletools.dis(pickle[pos:]) + except Exception: + # ignore exceptions while disassembling the pickle since the + # real issue is that it references a unavailable module + pass finally: pdb.set_trace() + elif debug and pos is None: + pdb.set_trace() return False return True
{"golden_diff": "diff --git a/Products/CMFPlone/_scripts/verifydb.py b/Products/CMFPlone/_scripts/verifydb.py\n--- a/Products/CMFPlone/_scripts/verifydb.py\n+++ b/Products/CMFPlone/_scripts/verifydb.py\n@@ -64,6 +64,7 @@\n input_file = io.BytesIO(data)\n unpickler = PersistentUnpickler(None, persistent_load, input_file)\n class_info = 'unknown'\n+ pos = None\n try:\n class_info = unpickler.load()\n pos = input_file.tell()\n@@ -77,11 +78,17 @@\n ))\n logger.info(repr(pickle))\n logger.info(traceback.format_exc())\n- if debug:\n+ if debug and pos is not None:\n try:\n pickletools.dis(pickle[pos:])\n+ except Exception:\n+ # ignore exceptions while disassembling the pickle since the\n+ # real issue is that it references a unavailable module\n+ pass\n finally:\n pdb.set_trace()\n+ elif debug and pos is None:\n+ pdb.set_trace()\n return False\n return True\n", "issue": "verifydb fails with debug in some cases\nUsing `./bin/instance verifydb -D` to check the `Data.fs` it fails in some cases:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py\", line 68, in verify_record\r\n class_info = unpickler.load()\r\n File \"/Users/pbauer/.cache/buildout/eggs/ZODB-5.5.1-py3.7.egg/ZODB/_compat.py\", line 62, in find_class\r\n return super(Unpickler, self).find_class(modulename, name)\r\nModuleNotFoundError: No module named 'Products.Archetypes'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"./bin/instance\", line 262, in <module>\r\n + sys.argv[1:]))\r\n File \"/Users/pbauer/.cache/buildout/eggs/plone.recipe.zope2instance-6.1.3-py3.7.egg/plone/recipe/zope2instance/ctl.py\", line 937, in main\r\n c.onecmd(' '.join(options.args))\r\n File \"/usr/local/Cellar/python/3.7.2_2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/cmd.py\", line 217, in onecmd\r\n return func(arg)\r\n File \"/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py\", line 31, in zopectl_entry\r\n verify_zodb(app, debug=options.debug)\r\n File \"/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py\", line 50, in verify_zodb\r\n success = verify_record(oid, data, debug)\r\n File \"/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py\", line 82, in verify_record\r\n pickletools.dis(pickle[pos:])\r\nUnboundLocalError: local variable 'pos' referenced before assignment\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom Zope2.Startup.run import make_wsgi_app\nfrom ZODB.interfaces import IStorageCurrentRecordIteration\nfrom ZODB.serialize import PersistentUnpickler\n\nimport argparse\nimport io\nimport logging\nimport pdb\nimport pickletools\nimport sys\nimport traceback\nimport Zope2\n\nlogger = logging.getLogger('zodbverify')\n\n\ndef zopectl_entry(self, arg):\n parser = argparse.ArgumentParser(\n prog=sys.argv[0] + ' verifydb',\n description='Verifies that all records in the database can be loaded.',\n )\n parser.add_argument(\n '-D', '--debug', action='store_true', dest='debug',\n help='pause to debug broken pickles')\n options = parser.parse_args(arg.split(' ') if arg else [])\n\n logging.basicConfig(level=logging.INFO)\n make_wsgi_app({}, self.options.configfile)\n app = Zope2.app()\n verify_zodb(app, debug=options.debug)\n\n\ndef verify_zodb(obj, debug=False):\n storage = obj._p_jar._db._storage\n if not IStorageCurrentRecordIteration.providedBy(storage):\n raise TypeError(\n 'ZODB storage {} does not implement record_iternext'.format(\n storage))\n\n logger.info('Scanning ZODB...')\n\n next_ = None\n count = 0\n errors = 0\n while True:\n count += 1\n oid, tid, data, next_ = storage.record_iternext(next_)\n logger.debug('Verifying {}'.format(oid))\n success = verify_record(oid, data, debug)\n if not success:\n errors += 1\n if next_ is None:\n break\n\n logger.info(\n 'Done! Scanned {} records. '\n 'Found {} records that could not be loaded.'.format(\n count, errors)\n )\n\n\ndef verify_record(oid, data, debug=False):\n input_file = io.BytesIO(data)\n unpickler = PersistentUnpickler(None, persistent_load, input_file)\n class_info = 'unknown'\n try:\n class_info = unpickler.load()\n pos = input_file.tell()\n unpickler.load()\n except Exception:\n input_file.seek(0)\n pickle = input_file.read()\n logger.info('\\nCould not process {} record {}:'.format(\n class_info,\n repr(oid),\n ))\n logger.info(repr(pickle))\n logger.info(traceback.format_exc())\n if debug:\n try:\n pickletools.dis(pickle[pos:])\n finally:\n pdb.set_trace()\n return False\n return True\n\n\ndef persistent_load(ref):\n pass\n", "path": "Products/CMFPlone/_scripts/verifydb.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom Zope2.Startup.run import make_wsgi_app\nfrom ZODB.interfaces import IStorageCurrentRecordIteration\nfrom ZODB.serialize import PersistentUnpickler\n\nimport argparse\nimport io\nimport logging\nimport pdb\nimport pickletools\nimport sys\nimport traceback\nimport Zope2\n\nlogger = logging.getLogger('zodbverify')\n\n\ndef zopectl_entry(self, arg):\n parser = argparse.ArgumentParser(\n prog=sys.argv[0] + ' verifydb',\n description='Verifies that all records in the database can be loaded.',\n )\n parser.add_argument(\n '-D', '--debug', action='store_true', dest='debug',\n help='pause to debug broken pickles')\n options = parser.parse_args(arg.split(' ') if arg else [])\n\n logging.basicConfig(level=logging.INFO)\n make_wsgi_app({}, self.options.configfile)\n app = Zope2.app()\n verify_zodb(app, debug=options.debug)\n\n\ndef verify_zodb(obj, debug=False):\n storage = obj._p_jar._db._storage\n if not IStorageCurrentRecordIteration.providedBy(storage):\n raise TypeError(\n 'ZODB storage {} does not implement record_iternext'.format(\n storage))\n\n logger.info('Scanning ZODB...')\n\n next_ = None\n count = 0\n errors = 0\n while True:\n count += 1\n oid, tid, data, next_ = storage.record_iternext(next_)\n logger.debug('Verifying {}'.format(oid))\n success = verify_record(oid, data, debug)\n if not success:\n errors += 1\n if next_ is None:\n break\n\n logger.info(\n 'Done! Scanned {} records. '\n 'Found {} records that could not be loaded.'.format(\n count, errors)\n )\n\n\ndef verify_record(oid, data, debug=False):\n input_file = io.BytesIO(data)\n unpickler = PersistentUnpickler(None, persistent_load, input_file)\n class_info = 'unknown'\n pos = None\n try:\n class_info = unpickler.load()\n pos = input_file.tell()\n unpickler.load()\n except Exception:\n input_file.seek(0)\n pickle = input_file.read()\n logger.info('\\nCould not process {} record {}:'.format(\n class_info,\n repr(oid),\n ))\n logger.info(repr(pickle))\n logger.info(traceback.format_exc())\n if debug and pos is not None:\n try:\n pickletools.dis(pickle[pos:])\n except Exception:\n # ignore exceptions while disassembling the pickle since the\n # real issue is that it references a unavailable module\n pass\n finally:\n pdb.set_trace()\n elif debug and pos is None:\n pdb.set_trace()\n return False\n return True\n\n\ndef persistent_load(ref):\n pass\n", "path": "Products/CMFPlone/_scripts/verifydb.py"}]}
1,572
259
gh_patches_debug_20175
rasdani/github-patches
git_diff
bridgecrewio__checkov-3750
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CKV_AZURE_9 & CKV_AZURE_10 - Scan fails if protocol value is a wildcard **Describe the issue** CKV_AZURE_9 & CKV_AZURE_10 When scanning Bicep files the checks are looking for a protocol value of `tcp` and fail to catch when `*` is used. **Examples** The following bicep code fails to produce a finding for CKV_AZURE_9 & CKV_AZURE_10 ``` resource nsg 'Microsoft.Network/networkSecurityGroups@2021-05-01' = { name: nsgName location: nsgLocation properties: { securityRules: [ { name: 'badrule' properties: { access: 'Allow' destinationAddressPrefix: '*' destinationPortRange: '*' direction: 'Inbound' priority: 100 protocol: '*' sourceAddressPrefix: '*' sourcePortRange: '*' } } ] } } ``` While this works as expected: ``` resource nsg 'Microsoft.Network/networkSecurityGroups@2021-05-01' = { name: nsgName location: nsgLocation properties: { securityRules: [ { name: 'badrule' properties: { access: 'Allow' destinationAddressPrefix: '*' destinationPortRange: '*' direction: 'Inbound' priority: 100 protocol: 'tcp' sourceAddressPrefix: '*' sourcePortRange: '*' } } ] } } ``` **Version (please complete the following information):** - docker container 2.2.0 **Additional context** A similar problem existed for Terraform that was previously fixed (see https://github.com/bridgecrewio/checkov/issues/601) I believe the relevant lines is: https://github.com/bridgecrewio/checkov/blob/master/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py#LL48C4-L48C117 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/arm/checks/resource/NSGRulePortAccessRestricted.py` Content: ``` 1 import re 2 from typing import Union, Dict, Any 3 4 from checkov.common.models.enums import CheckResult, CheckCategories 5 from checkov.arm.base_resource_check import BaseResourceCheck 6 7 # https://docs.microsoft.com/en-us/azure/templates/microsoft.network/networksecuritygroups 8 # https://docs.microsoft.com/en-us/azure/templates/microsoft.network/networksecuritygroups/securityrules 9 10 INTERNET_ADDRESSES = ["*", "0.0.0.0", "<nw>/0", "/0", "internet", "any"] # nosec 11 PORT_RANGE = re.compile(r"\d+-\d+") 12 13 14 class NSGRulePortAccessRestricted(BaseResourceCheck): 15 def __init__(self, name: str, check_id: str, port: int) -> None: 16 supported_resources = ( 17 "Microsoft.Network/networkSecurityGroups", 18 "Microsoft.Network/networkSecurityGroups/securityRules", 19 ) 20 categories = (CheckCategories.NETWORKING,) 21 super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources) 22 self.port = port 23 24 def is_port_in_range(self, port_range: Union[int, str]) -> bool: 25 if re.match(PORT_RANGE, str(port_range)): 26 start, end = int(port_range.split("-")[0]), int(port_range.split("-")[1]) 27 if start <= self.port <= end: 28 return True 29 if port_range in (str(self.port), "*"): 30 return True 31 return False 32 33 def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult: 34 if "properties" in conf: 35 securityRules = [] 36 if self.entity_type == "Microsoft.Network/networkSecurityGroups": 37 if "securityRules" in conf["properties"]: 38 securityRules.extend(conf["properties"]["securityRules"]) 39 if self.entity_type == "Microsoft.Network/networkSecurityGroups/securityRules": 40 securityRules.append(conf) 41 42 for rule in securityRules: 43 portRanges = [] 44 sourcePrefixes = [] 45 if "properties" in rule: 46 if "access" in rule["properties"] and rule["properties"]["access"].lower() == "allow": 47 if "direction" in rule["properties"] and rule["properties"]["direction"].lower() == "inbound": 48 if "protocol" in rule["properties"] and rule["properties"]["protocol"].lower() == "tcp": 49 if "destinationPortRanges" in rule["properties"]: 50 portRanges.extend(rule["properties"]["destinationPortRanges"]) 51 if "destinationPortRange" in rule["properties"]: 52 portRanges.append(rule["properties"]["destinationPortRange"]) 53 54 if "sourceAddressPrefixes" in rule["properties"]: 55 sourcePrefixes.extend(rule["properties"]["sourceAddressPrefixes"]) 56 if "sourceAddressPrefix" in rule["properties"]: 57 sourcePrefixes.append(rule["properties"]["sourceAddressPrefix"]) 58 59 for portRange in portRanges: 60 if self.is_port_in_range(portRange): 61 for prefix in sourcePrefixes: 62 if prefix in INTERNET_ADDRESSES: 63 return CheckResult.FAILED 64 65 return CheckResult.PASSED 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py b/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py --- a/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py +++ b/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py @@ -45,7 +45,7 @@ if "properties" in rule: if "access" in rule["properties"] and rule["properties"]["access"].lower() == "allow": if "direction" in rule["properties"] and rule["properties"]["direction"].lower() == "inbound": - if "protocol" in rule["properties"] and rule["properties"]["protocol"].lower() == "tcp": + if "protocol" in rule["properties"] and rule["properties"]["protocol"].lower() in ("tcp", "*"): if "destinationPortRanges" in rule["properties"]: portRanges.extend(rule["properties"]["destinationPortRanges"]) if "destinationPortRange" in rule["properties"]:
{"golden_diff": "diff --git a/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py b/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py\n--- a/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py\n+++ b/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py\n@@ -45,7 +45,7 @@\n if \"properties\" in rule:\n if \"access\" in rule[\"properties\"] and rule[\"properties\"][\"access\"].lower() == \"allow\":\n if \"direction\" in rule[\"properties\"] and rule[\"properties\"][\"direction\"].lower() == \"inbound\":\n- if \"protocol\" in rule[\"properties\"] and rule[\"properties\"][\"protocol\"].lower() == \"tcp\":\n+ if \"protocol\" in rule[\"properties\"] and rule[\"properties\"][\"protocol\"].lower() in (\"tcp\", \"*\"):\n if \"destinationPortRanges\" in rule[\"properties\"]:\n portRanges.extend(rule[\"properties\"][\"destinationPortRanges\"])\n if \"destinationPortRange\" in rule[\"properties\"]:\n", "issue": "CKV_AZURE_9 & CKV_AZURE_10 - Scan fails if protocol value is a wildcard\n**Describe the issue**\r\nCKV_AZURE_9 & CKV_AZURE_10\r\n\r\nWhen scanning Bicep files the checks are looking for a protocol value of `tcp` and fail to catch when `*` is used.\r\n\r\n**Examples**\r\n\r\nThe following bicep code fails to produce a finding for CKV_AZURE_9 & CKV_AZURE_10\r\n```\r\nresource nsg 'Microsoft.Network/networkSecurityGroups@2021-05-01' = {\r\n name: nsgName\r\n location: nsgLocation\r\n properties: {\r\n securityRules: [\r\n {\r\n name: 'badrule'\r\n properties: {\r\n access: 'Allow'\r\n destinationAddressPrefix: '*'\r\n destinationPortRange: '*'\r\n direction: 'Inbound'\r\n priority: 100\r\n protocol: '*'\r\n sourceAddressPrefix: '*'\r\n sourcePortRange: '*'\r\n }\r\n }\r\n ]\r\n }\r\n}\r\n```\r\n\r\nWhile this works as expected:\r\n```\r\nresource nsg 'Microsoft.Network/networkSecurityGroups@2021-05-01' = {\r\n name: nsgName\r\n location: nsgLocation\r\n properties: {\r\n securityRules: [\r\n {\r\n name: 'badrule'\r\n properties: {\r\n access: 'Allow'\r\n destinationAddressPrefix: '*'\r\n destinationPortRange: '*'\r\n direction: 'Inbound'\r\n priority: 100\r\n protocol: 'tcp'\r\n sourceAddressPrefix: '*'\r\n sourcePortRange: '*'\r\n }\r\n }\r\n ]\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - docker container 2.2.0\r\n\r\n**Additional context**\r\nA similar problem existed for Terraform that was previously fixed (see https://github.com/bridgecrewio/checkov/issues/601) \r\n\r\nI believe the relevant lines is: \r\nhttps://github.com/bridgecrewio/checkov/blob/master/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py#LL48C4-L48C117\r\n\r\n\n", "before_files": [{"content": "import re\nfrom typing import Union, Dict, Any\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.arm.base_resource_check import BaseResourceCheck\n\n# https://docs.microsoft.com/en-us/azure/templates/microsoft.network/networksecuritygroups\n# https://docs.microsoft.com/en-us/azure/templates/microsoft.network/networksecuritygroups/securityrules\n\nINTERNET_ADDRESSES = [\"*\", \"0.0.0.0\", \"<nw>/0\", \"/0\", \"internet\", \"any\"] # nosec\nPORT_RANGE = re.compile(r\"\\d+-\\d+\")\n\n\nclass NSGRulePortAccessRestricted(BaseResourceCheck):\n def __init__(self, name: str, check_id: str, port: int) -> None:\n supported_resources = (\n \"Microsoft.Network/networkSecurityGroups\",\n \"Microsoft.Network/networkSecurityGroups/securityRules\",\n )\n categories = (CheckCategories.NETWORKING,)\n super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)\n self.port = port\n\n def is_port_in_range(self, port_range: Union[int, str]) -> bool:\n if re.match(PORT_RANGE, str(port_range)):\n start, end = int(port_range.split(\"-\")[0]), int(port_range.split(\"-\")[1])\n if start <= self.port <= end:\n return True\n if port_range in (str(self.port), \"*\"):\n return True\n return False\n\n def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:\n if \"properties\" in conf:\n securityRules = []\n if self.entity_type == \"Microsoft.Network/networkSecurityGroups\":\n if \"securityRules\" in conf[\"properties\"]:\n securityRules.extend(conf[\"properties\"][\"securityRules\"])\n if self.entity_type == \"Microsoft.Network/networkSecurityGroups/securityRules\":\n securityRules.append(conf)\n\n for rule in securityRules:\n portRanges = []\n sourcePrefixes = []\n if \"properties\" in rule:\n if \"access\" in rule[\"properties\"] and rule[\"properties\"][\"access\"].lower() == \"allow\":\n if \"direction\" in rule[\"properties\"] and rule[\"properties\"][\"direction\"].lower() == \"inbound\":\n if \"protocol\" in rule[\"properties\"] and rule[\"properties\"][\"protocol\"].lower() == \"tcp\":\n if \"destinationPortRanges\" in rule[\"properties\"]:\n portRanges.extend(rule[\"properties\"][\"destinationPortRanges\"])\n if \"destinationPortRange\" in rule[\"properties\"]:\n portRanges.append(rule[\"properties\"][\"destinationPortRange\"])\n\n if \"sourceAddressPrefixes\" in rule[\"properties\"]:\n sourcePrefixes.extend(rule[\"properties\"][\"sourceAddressPrefixes\"])\n if \"sourceAddressPrefix\" in rule[\"properties\"]:\n sourcePrefixes.append(rule[\"properties\"][\"sourceAddressPrefix\"])\n\n for portRange in portRanges:\n if self.is_port_in_range(portRange):\n for prefix in sourcePrefixes:\n if prefix in INTERNET_ADDRESSES:\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n", "path": "checkov/arm/checks/resource/NSGRulePortAccessRestricted.py"}], "after_files": [{"content": "import re\nfrom typing import Union, Dict, Any\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.arm.base_resource_check import BaseResourceCheck\n\n# https://docs.microsoft.com/en-us/azure/templates/microsoft.network/networksecuritygroups\n# https://docs.microsoft.com/en-us/azure/templates/microsoft.network/networksecuritygroups/securityrules\n\nINTERNET_ADDRESSES = [\"*\", \"0.0.0.0\", \"<nw>/0\", \"/0\", \"internet\", \"any\"] # nosec\nPORT_RANGE = re.compile(r\"\\d+-\\d+\")\n\n\nclass NSGRulePortAccessRestricted(BaseResourceCheck):\n def __init__(self, name: str, check_id: str, port: int) -> None:\n supported_resources = (\n \"Microsoft.Network/networkSecurityGroups\",\n \"Microsoft.Network/networkSecurityGroups/securityRules\",\n )\n categories = (CheckCategories.NETWORKING,)\n super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)\n self.port = port\n\n def is_port_in_range(self, port_range: Union[int, str]) -> bool:\n if re.match(PORT_RANGE, str(port_range)):\n start, end = int(port_range.split(\"-\")[0]), int(port_range.split(\"-\")[1])\n if start <= self.port <= end:\n return True\n if port_range in (str(self.port), \"*\"):\n return True\n return False\n\n def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:\n if \"properties\" in conf:\n securityRules = []\n if self.entity_type == \"Microsoft.Network/networkSecurityGroups\":\n if \"securityRules\" in conf[\"properties\"]:\n securityRules.extend(conf[\"properties\"][\"securityRules\"])\n if self.entity_type == \"Microsoft.Network/networkSecurityGroups/securityRules\":\n securityRules.append(conf)\n\n for rule in securityRules:\n portRanges = []\n sourcePrefixes = []\n if \"properties\" in rule:\n if \"access\" in rule[\"properties\"] and rule[\"properties\"][\"access\"].lower() == \"allow\":\n if \"direction\" in rule[\"properties\"] and rule[\"properties\"][\"direction\"].lower() == \"inbound\":\n if \"protocol\" in rule[\"properties\"] and rule[\"properties\"][\"protocol\"].lower() in (\"tcp\", \"*\"):\n if \"destinationPortRanges\" in rule[\"properties\"]:\n portRanges.extend(rule[\"properties\"][\"destinationPortRanges\"])\n if \"destinationPortRange\" in rule[\"properties\"]:\n portRanges.append(rule[\"properties\"][\"destinationPortRange\"])\n\n if \"sourceAddressPrefixes\" in rule[\"properties\"]:\n sourcePrefixes.extend(rule[\"properties\"][\"sourceAddressPrefixes\"])\n if \"sourceAddressPrefix\" in rule[\"properties\"]:\n sourcePrefixes.append(rule[\"properties\"][\"sourceAddressPrefix\"])\n\n for portRange in portRanges:\n if self.is_port_in_range(portRange):\n for prefix in sourcePrefixes:\n if prefix in INTERNET_ADDRESSES:\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n", "path": "checkov/arm/checks/resource/NSGRulePortAccessRestricted.py"}]}
1,516
221
gh_patches_debug_5785
rasdani/github-patches
git_diff
ivy-llc__ivy-17476
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- matrix_power --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/paddle/tensor/linalg.py` Content: ``` 1 # global 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes 4 from ivy.functional.frontends.paddle import promote_types_of_paddle_inputs 5 from ivy.functional.frontends.paddle.func_wrapper import ( 6 to_ivy_arrays_and_back, 7 ) 8 9 10 @with_supported_dtypes( 11 {"2.4.2 and below": ("float32", "float64", "int32", "int64")}, "paddle" 12 ) 13 @to_ivy_arrays_and_back 14 def cross(x, y, /, *, axis=9, name=None): 15 x, y = promote_types_of_paddle_inputs(x, y) 16 return ivy.cross(x, y, axis=axis) 17 18 19 # matmul 20 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 21 @to_ivy_arrays_and_back 22 def matmul(x, y, transpose_x=False, transpose_y=False, name=None): 23 x, y = promote_types_of_paddle_inputs(x, y) 24 return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y) 25 26 27 # norm 28 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") 29 @to_ivy_arrays_and_back 30 def norm(x, p="fro", axis=None, keepdim=False, name=None): 31 if axis is None and p is not None: 32 if p == "fro": 33 p = 2 34 ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1) 35 if keepdim: 36 ret = ret.reshape([1] * len(x.shape)) 37 if len(ret.shape) == 0: 38 return ivy.array([ret]) 39 return ret 40 41 if isinstance(axis, tuple): 42 axis = list(axis) 43 if isinstance(axis, list) and len(axis) == 1: 44 axis = axis[0] 45 46 if isinstance(axis, int): 47 if p == "fro": 48 p = 2 49 if p in [0, 1, 2, ivy.inf, -ivy.inf]: 50 ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim) 51 elif isinstance(p, (int, float)): 52 ret = ivy.pow( 53 ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim), 54 float(1.0 / p), 55 ) 56 57 elif isinstance(axis, list) and len(axis) == 2: 58 if p == 0: 59 raise ValueError 60 elif p == 1: 61 ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim) 62 elif p == 2 or p == "fro": 63 ret = ivy.matrix_norm(x, ord="fro", axis=axis, keepdims=keepdim) 64 elif p == ivy.inf: 65 ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim) 66 elif p == -ivy.inf: 67 ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim) 68 elif isinstance(p, (int, float)) and p > 0: 69 ret = ivy.pow( 70 ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim), 71 float(1.0 / p), 72 ) 73 else: 74 raise ValueError 75 76 else: 77 raise ValueError 78 79 if len(ret.shape) == 0: 80 ret = ivy.array( 81 [ret] 82 ) # this is done so as to match shape of output from paddle 83 return ret 84 85 86 # eig 87 @to_ivy_arrays_and_back 88 def eig(x, name=None): 89 return ivy.eig(x) 90 91 92 # eigvals 93 @to_ivy_arrays_and_back 94 def eigvals(x, name=None): 95 return ivy.eigvals(x) 96 97 98 # eigvalsh 99 @to_ivy_arrays_and_back 100 def eigvalsh(x, UPLO="L", name=None): 101 return ivy.eigvalsh(x, UPLO=UPLO) 102 103 104 # eigh 105 @to_ivy_arrays_and_back 106 def eigh(x, UPLO="L", name=None): 107 return ivy.eigh(x, UPLO=UPLO) 108 109 110 # pinv 111 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 112 @to_ivy_arrays_and_back 113 def pinv(x, rcond=1e-15, hermitian=False, name=None): 114 # TODO: Add hermitian functionality 115 return ivy.pinv(x, rtol=rcond) 116 117 118 # solve 119 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 120 @to_ivy_arrays_and_back 121 def solve(x1, x2, name=None): 122 return ivy.solve(x1, x2) 123 124 125 # cholesky 126 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") 127 @to_ivy_arrays_and_back 128 def cholesky(x, /, *, upper=False, name=None): 129 return ivy.cholesky(x, upper=upper) 130 131 132 # bmm 133 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 134 @to_ivy_arrays_and_back 135 def bmm(x, y, transpose_x=False, transpose_y=False, name=None): 136 if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3: 137 raise RuntimeError("input must be 3D matrices") 138 x, y = promote_types_of_paddle_inputs(x, y) 139 return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y) 140 141 142 # matrix_power 143 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 144 @to_ivy_arrays_and_back 145 def matrix_power(x, n, name=None): 146 return ivy.matrix_power(x, n) 147 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py --- a/ivy/functional/frontends/paddle/tensor/linalg.py +++ b/ivy/functional/frontends/paddle/tensor/linalg.py @@ -130,3 +130,10 @@ raise RuntimeError("input must be 3D matrices") x, y = promote_types_of_paddle_inputs(x, y) return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y) + + +# matrix_power +@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") +@to_ivy_arrays_and_back +def matrix_power(x, n, name=None): + return ivy.matrix_power(x, n)
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py\n--- a/ivy/functional/frontends/paddle/tensor/linalg.py\n+++ b/ivy/functional/frontends/paddle/tensor/linalg.py\n@@ -130,3 +130,10 @@\n raise RuntimeError(\"input must be 3D matrices\")\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n+\n+\n+# matrix_power\n+@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def matrix_power(x, n, name=None):\n+ return ivy.matrix_power(x, n)\n", "issue": " matrix_power\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef cross(x, y, /, *, axis=9, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.cross(x, y, axis=axis)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# norm\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n if len(ret.shape) == 0:\n return ivy.array([ret])\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n if len(ret.shape) == 0:\n ret = ivy.array(\n [ret]\n ) # this is done so as to match shape of output from paddle\n return ret\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# pinv\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n\n\n# solve\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef solve(x1, x2, name=None):\n return ivy.solve(x1, x2)\n\n\n# cholesky\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cholesky(x, /, *, upper=False, name=None):\n return ivy.cholesky(x, upper=upper)\n\n\n# bmm\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bmm(x, y, transpose_x=False, transpose_y=False, name=None):\n if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:\n raise RuntimeError(\"input must be 3D matrices\")\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# matrix_power\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matrix_power(x, n, name=None):\n return ivy.matrix_power(x, n)\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef cross(x, y, /, *, axis=9, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.cross(x, y, axis=axis)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# norm\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n if len(ret.shape) == 0:\n return ivy.array([ret])\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n if len(ret.shape) == 0:\n ret = ivy.array(\n [ret]\n ) # this is done so as to match shape of output from paddle\n return ret\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# pinv\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n\n\n# cholesky\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cholesky(x, /, *, upper=False, name=None):\n return ivy.cholesky(x, upper=upper)\n\n\n# bmm\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bmm(x, y, transpose_x=False, transpose_y=False, name=None):\n if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:\n raise RuntimeError(\"input must be 3D matrices\")\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# matrix_power\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matrix_power(x, n, name=None):\n return ivy.matrix_power(x, n)\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py"}]}
1,992
196
gh_patches_debug_51715
rasdani/github-patches
git_diff
paperless-ngx__paperless-ngx-1358
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Error starting gunicorn when IPv6 is disabled on host ### Description Paperless fails to launch in at least certain scenarios when IPv6 is disabled on the host. This was working before a change from listening on `0.0.0.0` to listening on `::`. ### Steps to reproduce Add the following to `/etc/sysctl.conf`: ``` net.ipv6.conf.all.disable_ipv6=1 net.ipv6.conf.default.disable_ipv6=1 net.ipv6.conf.lo.disable_ipv6=1 ``` Set `GRUB_CMDLINE_LINUX_DEFAULT="ipv6.disable=1"` in `/etc/default/grub`, and `update-grub`. Reboot and run paperless-ngx ### Webserver logs ```bash Paperless-ngx docker container starting... Mapping UID and GID for paperless:paperless to 1000:65538 usermod: no changes Creating directory /tmp/paperless Adjusting permissions of paperless files. This may take a while. Waiting for PostgreSQL to start... Waiting for Redis: redis://paperless-redis.paperless.svc:6379 Connected to Redis broker: redis://paperless-redis.paperless.svc:6379 Apply database migrations... Operations to perform: Apply all migrations: admin, auth, authtoken, contenttypes, django_q, documents, paperless_mail, sessions Running migrations: No migrations to apply. Executing /usr/local/bin/paperless_cmd.sh 2022-08-04 14:20:24,984 INFO Set uid to user 0 succeeded 2022-08-04 14:20:24,985 INFO supervisord started with pid 49 2022-08-04 14:20:25,988 INFO spawned: 'consumer' with pid 50 2022-08-04 14:20:25,990 INFO spawned: 'gunicorn' with pid 51 2022-08-04 14:20:25,992 INFO spawned: 'scheduler' with pid 52 [2022-08-04 10:20:26 -0400] [51] [INFO] Starting gunicorn 20.1.0 [2022-08-04 10:20:35 -0400] [72] [ERROR] Retrying in 1 second. [2022-08-04 10:20:36 -0400] [72] [ERROR] Retrying in 1 second. [2022-08-04 10:20:37 -0400] [72] [ERROR] Can't connect to ('::', 8000) 2022-08-04 14:20:37,727 INFO exited: gunicorn (exit status 1; not expected) 2022-08-04 14:20:38,730 INFO spawned: 'gunicorn' with pid 74 [2022-08-04 10:20:39 -0400] [74] [INFO] Starting gunicorn 20.1.0 [2022-08-04 10:20:39 -0400] [74] [ERROR] Retrying in 1 second. 2022-08-04 14:20:40,017 INFO success: gunicorn entered RUNNING state, process has stayed up for > than 1 seconds (startsecs) [2022-08-04 10:20:40 -0400] [74] [ERROR] Retrying in 1 second. [2022-08-04 10:20:41 -0400] [74] [ERROR] Retrying in 1 second. [2022-08-04 10:20:42 -0400] [74] [ERROR] Retrying in 1 second. [2022-08-04 10:20:43 -0400] [74] [ERROR] Retrying in 1 second. [2022-08-04 10:20:44 -0400] [74] [ERROR] Can't connect to ('::', 8000) 2022-08-04 14:20:44,069 INFO exited: gunicorn (exit status 1; not expected) ``` ### Paperless-ngx version 1.8.0 ### Host OS Ubuntu 22.04 x64, Microk8s 1.22 ### Installation method Docker - official image ### Browser _No response_ ### Configuration changes _No response_ ### Other This seems to be a regression from https://github.com/paperless-ngx/paperless-ngx/pull/924. Changing the listen IP back to 0.0.0.0 in gunicorn.conf.py causing it to bind correctly again --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gunicorn.conf.py` Content: ``` 1 import os 2 3 bind = f'[::]:{os.getenv("PAPERLESS_PORT", 8000)}' 4 workers = int(os.getenv("PAPERLESS_WEBSERVER_WORKERS", 1)) 5 worker_class = "paperless.workers.ConfigurableWorker" 6 timeout = 120 7 8 9 def pre_fork(server, worker): 10 pass 11 12 13 def pre_exec(server): 14 server.log.info("Forked child, re-executing.") 15 16 17 def when_ready(server): 18 server.log.info("Server is ready. Spawning workers") 19 20 21 def worker_int(worker): 22 worker.log.info("worker received INT or QUIT signal") 23 24 ## get traceback info 25 import threading, sys, traceback 26 27 id2name = {th.ident: th.name for th in threading.enumerate()} 28 code = [] 29 for threadId, stack in sys._current_frames().items(): 30 code.append("\n# Thread: %s(%d)" % (id2name.get(threadId, ""), threadId)) 31 for filename, lineno, name, line in traceback.extract_stack(stack): 32 code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) 33 if line: 34 code.append(" %s" % (line.strip())) 35 worker.log.debug("\n".join(code)) 36 37 38 def worker_abort(worker): 39 worker.log.info("worker received SIGABRT signal") 40 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gunicorn.conf.py b/gunicorn.conf.py --- a/gunicorn.conf.py +++ b/gunicorn.conf.py @@ -1,6 +1,6 @@ import os -bind = f'[::]:{os.getenv("PAPERLESS_PORT", 8000)}' +bind = f'{os.getenv("PAPERLESS_BIND_ADDR", "[::]")}:{os.getenv("PAPERLESS_PORT", 8000)}' workers = int(os.getenv("PAPERLESS_WEBSERVER_WORKERS", 1)) worker_class = "paperless.workers.ConfigurableWorker" timeout = 120
{"golden_diff": "diff --git a/gunicorn.conf.py b/gunicorn.conf.py\n--- a/gunicorn.conf.py\n+++ b/gunicorn.conf.py\n@@ -1,6 +1,6 @@\n import os\n \n-bind = f'[::]:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\n+bind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\n workers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\n worker_class = \"paperless.workers.ConfigurableWorker\"\n timeout = 120\n", "issue": "[BUG] Error starting gunicorn when IPv6 is disabled on host\n### Description\n\nPaperless fails to launch in at least certain scenarios when IPv6 is disabled on the host. This was working before a change from listening on `0.0.0.0` to listening on `::`.\n\n### Steps to reproduce\n\nAdd the following to `/etc/sysctl.conf`:\r\n\r\n```\r\nnet.ipv6.conf.all.disable_ipv6=1\r\nnet.ipv6.conf.default.disable_ipv6=1\r\nnet.ipv6.conf.lo.disable_ipv6=1\r\n```\r\n\r\nSet `GRUB_CMDLINE_LINUX_DEFAULT=\"ipv6.disable=1\"` in `/etc/default/grub`, and `update-grub`.\r\n\r\nReboot and run paperless-ngx\n\n### Webserver logs\n\n```bash\nPaperless-ngx docker container starting...\r\nMapping UID and GID for paperless:paperless to 1000:65538\r\nusermod: no changes\r\nCreating directory /tmp/paperless\r\nAdjusting permissions of paperless files. This may take a while.\r\nWaiting for PostgreSQL to start...\r\nWaiting for Redis: redis://paperless-redis.paperless.svc:6379\r\nConnected to Redis broker: redis://paperless-redis.paperless.svc:6379\r\nApply database migrations...\r\nOperations to perform:\r\n Apply all migrations: admin, auth, authtoken, contenttypes, django_q, documents, paperless_mail, sessions\r\nRunning migrations:\r\n No migrations to apply.\r\nExecuting /usr/local/bin/paperless_cmd.sh\r\n2022-08-04 14:20:24,984 INFO Set uid to user 0 succeeded\r\n2022-08-04 14:20:24,985 INFO supervisord started with pid 49\r\n2022-08-04 14:20:25,988 INFO spawned: 'consumer' with pid 50\r\n2022-08-04 14:20:25,990 INFO spawned: 'gunicorn' with pid 51\r\n2022-08-04 14:20:25,992 INFO spawned: 'scheduler' with pid 52\r\n[2022-08-04 10:20:26 -0400] [51] [INFO] Starting gunicorn 20.1.0\r\n[2022-08-04 10:20:35 -0400] [72] [ERROR] Retrying in 1 second.\r\n[2022-08-04 10:20:36 -0400] [72] [ERROR] Retrying in 1 second.\r\n[2022-08-04 10:20:37 -0400] [72] [ERROR] Can't connect to ('::', 8000)\r\n2022-08-04 14:20:37,727 INFO exited: gunicorn (exit status 1; not expected)\r\n2022-08-04 14:20:38,730 INFO spawned: 'gunicorn' with pid 74\r\n[2022-08-04 10:20:39 -0400] [74] [INFO] Starting gunicorn 20.1.0\r\n[2022-08-04 10:20:39 -0400] [74] [ERROR] Retrying in 1 second.\r\n2022-08-04 14:20:40,017 INFO success: gunicorn entered RUNNING state, process has stayed up for > than 1 seconds (startsecs)\r\n[2022-08-04 10:20:40 -0400] [74] [ERROR] Retrying in 1 second.\r\n[2022-08-04 10:20:41 -0400] [74] [ERROR] Retrying in 1 second.\r\n[2022-08-04 10:20:42 -0400] [74] [ERROR] Retrying in 1 second.\r\n[2022-08-04 10:20:43 -0400] [74] [ERROR] Retrying in 1 second.\r\n[2022-08-04 10:20:44 -0400] [74] [ERROR] Can't connect to ('::', 8000)\r\n2022-08-04 14:20:44,069 INFO exited: gunicorn (exit status 1; not expected)\n```\n\n\n### Paperless-ngx version\n\n1.8.0\n\n### Host OS\n\nUbuntu 22.04 x64, Microk8s 1.22\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\n_No response_\n\n### Configuration changes\n\n_No response_\n\n### Other\n\nThis seems to be a regression from https://github.com/paperless-ngx/paperless-ngx/pull/924. Changing the listen IP back to 0.0.0.0 in gunicorn.conf.py causing it to bind correctly again\n", "before_files": [{"content": "import os\n\nbind = f'[::]:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\nworkers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\nworker_class = \"paperless.workers.ConfigurableWorker\"\ntimeout = 120\n\n\ndef pre_fork(server, worker):\n pass\n\n\ndef pre_exec(server):\n server.log.info(\"Forked child, re-executing.\")\n\n\ndef when_ready(server):\n server.log.info(\"Server is ready. Spawning workers\")\n\n\ndef worker_int(worker):\n worker.log.info(\"worker received INT or QUIT signal\")\n\n ## get traceback info\n import threading, sys, traceback\n\n id2name = {th.ident: th.name for th in threading.enumerate()}\n code = []\n for threadId, stack in sys._current_frames().items():\n code.append(\"\\n# Thread: %s(%d)\" % (id2name.get(threadId, \"\"), threadId))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n code.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n code.append(\" %s\" % (line.strip()))\n worker.log.debug(\"\\n\".join(code))\n\n\ndef worker_abort(worker):\n worker.log.info(\"worker received SIGABRT signal\")\n", "path": "gunicorn.conf.py"}], "after_files": [{"content": "import os\n\nbind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\nworkers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\nworker_class = \"paperless.workers.ConfigurableWorker\"\ntimeout = 120\n\n\ndef pre_fork(server, worker):\n pass\n\n\ndef pre_exec(server):\n server.log.info(\"Forked child, re-executing.\")\n\n\ndef when_ready(server):\n server.log.info(\"Server is ready. Spawning workers\")\n\n\ndef worker_int(worker):\n worker.log.info(\"worker received INT or QUIT signal\")\n\n ## get traceback info\n import threading, sys, traceback\n\n id2name = {th.ident: th.name for th in threading.enumerate()}\n code = []\n for threadId, stack in sys._current_frames().items():\n code.append(\"\\n# Thread: %s(%d)\" % (id2name.get(threadId, \"\"), threadId))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n code.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n code.append(\" %s\" % (line.strip()))\n worker.log.debug(\"\\n\".join(code))\n\n\ndef worker_abort(worker):\n worker.log.info(\"worker received SIGABRT signal\")\n", "path": "gunicorn.conf.py"}]}
1,860
133
gh_patches_debug_15632
rasdani/github-patches
git_diff
getredash__redash-3362
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Clickhouse: password is optional but we try to access it anyway For Clickhouse type data sources, we don't require a password. But the code does require it by trying to directly access the value in the options dictionary, instead of using `get`: https://github.com/getredash/redash/blob/823e4ccdd6fcfee5d0df0d919d87af3100876549/redash/query_runner/clickhouse.py#L77 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redash/query_runner/clickhouse.py` Content: ``` 1 import logging 2 import re 3 4 import requests 5 6 from redash.query_runner import * 7 from redash.utils import json_dumps, json_loads 8 9 logger = logging.getLogger(__name__) 10 11 12 class ClickHouse(BaseSQLQueryRunner): 13 noop_query = "SELECT 1" 14 15 @classmethod 16 def configuration_schema(cls): 17 return { 18 "type": "object", 19 "properties": { 20 "url": { 21 "type": "string", 22 "default": "http://127.0.0.1:8123" 23 }, 24 "user": { 25 "type": "string", 26 "default": "default" 27 }, 28 "password": { 29 "type": "string" 30 }, 31 "dbname": { 32 "type": "string", 33 "title": "Database Name" 34 }, 35 "timeout": { 36 "type": "number", 37 "title": "Request Timeout", 38 "default": 30 39 } 40 }, 41 "required": ["dbname"], 42 "secret": ["password"] 43 } 44 45 @classmethod 46 def type(cls): 47 return "clickhouse" 48 49 def _get_tables(self, schema): 50 query = "SELECT database, table, name FROM system.columns WHERE database NOT IN ('system')" 51 52 results, error = self.run_query(query, None) 53 54 if error is not None: 55 raise Exception("Failed getting schema.") 56 57 results = json_loads(results) 58 59 for row in results['rows']: 60 table_name = '{}.{}'.format(row['database'], row['table']) 61 62 if table_name not in schema: 63 schema[table_name] = {'name': table_name, 'columns': []} 64 65 schema[table_name]['columns'].append(row['name']) 66 67 return schema.values() 68 69 def _send_query(self, data, stream=False): 70 r = requests.post( 71 self.configuration['url'], 72 data=data.encode("utf-8"), 73 stream=stream, 74 timeout=self.configuration.get('timeout', 30), 75 params={ 76 'user': self.configuration['user'], 77 'password': self.configuration['password'], 78 'database': self.configuration['dbname'] 79 } 80 ) 81 if r.status_code != 200: 82 raise Exception(r.text) 83 # logging.warning(r.json()) 84 return r.json() 85 86 @staticmethod 87 def _define_column_type(column): 88 c = column.lower() 89 f = re.search(r'^nullable\((.*)\)$', c) 90 if f is not None: 91 c = f.group(1) 92 if c.startswith('int') or c.startswith('uint'): 93 return TYPE_INTEGER 94 elif c.startswith('float'): 95 return TYPE_FLOAT 96 elif c == 'datetime': 97 return TYPE_DATETIME 98 elif c == 'date': 99 return TYPE_DATE 100 else: 101 return TYPE_STRING 102 103 def _clickhouse_query(self, query): 104 query += '\nFORMAT JSON' 105 result = self._send_query(query) 106 columns = [] 107 columns_int64 = [] # db converts value to string if its type equals UInt64 108 columns_totals = {} 109 110 for r in result['meta']: 111 column_name = r['name'] 112 column_type = self._define_column_type(r['type']) 113 114 if r['type'] in ('Int64', 'UInt64', 'Nullable(Int64)', 'Nullable(UInt64)'): 115 columns_int64.append(column_name) 116 else: 117 columns_totals[column_name] = 'Total' if column_type == TYPE_STRING else None 118 119 columns.append({'name': column_name, 'friendly_name': column_name, 'type': column_type}) 120 121 rows = result['data'] 122 for row in rows: 123 for column in columns_int64: 124 try: 125 row[column] = int(row[column]) 126 except TypeError: 127 row[column] = None 128 129 if 'totals' in result: 130 totals = result['totals'] 131 for column, value in columns_totals.iteritems(): 132 totals[column] = value 133 rows.append(totals) 134 135 return {'columns': columns, 'rows': rows} 136 137 def run_query(self, query, user): 138 logger.debug("Clickhouse is about to execute query: %s", query) 139 if query == "": 140 json_data = None 141 error = "Query is empty" 142 return json_data, error 143 try: 144 q = self._clickhouse_query(query) 145 data = json_dumps(q) 146 error = None 147 except Exception as e: 148 data = None 149 logging.exception(e) 150 error = unicode(e) 151 return data, error 152 153 register(ClickHouse) 154 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redash/query_runner/clickhouse.py b/redash/query_runner/clickhouse.py --- a/redash/query_runner/clickhouse.py +++ b/redash/query_runner/clickhouse.py @@ -68,13 +68,13 @@ def _send_query(self, data, stream=False): r = requests.post( - self.configuration['url'], + self.configuration.get('url', "http://127.0.0.1:8123"), data=data.encode("utf-8"), stream=stream, timeout=self.configuration.get('timeout', 30), params={ - 'user': self.configuration['user'], - 'password': self.configuration['password'], + 'user': self.configuration.get('user', "default"), + 'password': self.configuration.get('password', ""), 'database': self.configuration['dbname'] } )
{"golden_diff": "diff --git a/redash/query_runner/clickhouse.py b/redash/query_runner/clickhouse.py\n--- a/redash/query_runner/clickhouse.py\n+++ b/redash/query_runner/clickhouse.py\n@@ -68,13 +68,13 @@\n \n def _send_query(self, data, stream=False):\n r = requests.post(\n- self.configuration['url'],\n+ self.configuration.get('url', \"http://127.0.0.1:8123\"),\n data=data.encode(\"utf-8\"),\n stream=stream,\n timeout=self.configuration.get('timeout', 30),\n params={\n- 'user': self.configuration['user'],\n- 'password': self.configuration['password'],\n+ 'user': self.configuration.get('user', \"default\"),\n+ 'password': self.configuration.get('password', \"\"),\n 'database': self.configuration['dbname']\n }\n )\n", "issue": "Clickhouse: password is optional but we try to access it anyway\nFor Clickhouse type data sources, we don't require a password. But the code does require it by trying to directly access the value in the options dictionary, instead of using `get`:\r\n\r\nhttps://github.com/getredash/redash/blob/823e4ccdd6fcfee5d0df0d919d87af3100876549/redash/query_runner/clickhouse.py#L77\n", "before_files": [{"content": "import logging\nimport re\n\nimport requests\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClickHouse(BaseSQLQueryRunner):\n noop_query = \"SELECT 1\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"default\": \"http://127.0.0.1:8123\"\n },\n \"user\": {\n \"type\": \"string\",\n \"default\": \"default\"\n },\n \"password\": {\n \"type\": \"string\"\n },\n \"dbname\": {\n \"type\": \"string\",\n \"title\": \"Database Name\"\n },\n \"timeout\": {\n \"type\": \"number\",\n \"title\": \"Request Timeout\",\n \"default\": 30\n }\n },\n \"required\": [\"dbname\"],\n \"secret\": [\"password\"]\n }\n\n @classmethod\n def type(cls):\n return \"clickhouse\"\n\n def _get_tables(self, schema):\n query = \"SELECT database, table, name FROM system.columns WHERE database NOT IN ('system')\"\n\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results['rows']:\n table_name = '{}.{}'.format(row['database'], row['table'])\n\n if table_name not in schema:\n schema[table_name] = {'name': table_name, 'columns': []}\n\n schema[table_name]['columns'].append(row['name'])\n\n return schema.values()\n\n def _send_query(self, data, stream=False):\n r = requests.post(\n self.configuration['url'],\n data=data.encode(\"utf-8\"),\n stream=stream,\n timeout=self.configuration.get('timeout', 30),\n params={\n 'user': self.configuration['user'],\n 'password': self.configuration['password'],\n 'database': self.configuration['dbname']\n }\n )\n if r.status_code != 200:\n raise Exception(r.text)\n # logging.warning(r.json())\n return r.json()\n\n @staticmethod\n def _define_column_type(column):\n c = column.lower()\n f = re.search(r'^nullable\\((.*)\\)$', c)\n if f is not None:\n c = f.group(1)\n if c.startswith('int') or c.startswith('uint'):\n return TYPE_INTEGER\n elif c.startswith('float'):\n return TYPE_FLOAT\n elif c == 'datetime':\n return TYPE_DATETIME\n elif c == 'date':\n return TYPE_DATE\n else:\n return TYPE_STRING\n\n def _clickhouse_query(self, query):\n query += '\\nFORMAT JSON'\n result = self._send_query(query)\n columns = []\n columns_int64 = [] # db converts value to string if its type equals UInt64\n columns_totals = {}\n\n for r in result['meta']:\n column_name = r['name']\n column_type = self._define_column_type(r['type'])\n\n if r['type'] in ('Int64', 'UInt64', 'Nullable(Int64)', 'Nullable(UInt64)'):\n columns_int64.append(column_name)\n else:\n columns_totals[column_name] = 'Total' if column_type == TYPE_STRING else None\n\n columns.append({'name': column_name, 'friendly_name': column_name, 'type': column_type})\n\n rows = result['data']\n for row in rows:\n for column in columns_int64:\n try:\n row[column] = int(row[column])\n except TypeError:\n row[column] = None\n\n if 'totals' in result:\n totals = result['totals']\n for column, value in columns_totals.iteritems():\n totals[column] = value\n rows.append(totals)\n\n return {'columns': columns, 'rows': rows}\n\n def run_query(self, query, user):\n logger.debug(\"Clickhouse is about to execute query: %s\", query)\n if query == \"\":\n json_data = None\n error = \"Query is empty\"\n return json_data, error\n try:\n q = self._clickhouse_query(query)\n data = json_dumps(q)\n error = None\n except Exception as e:\n data = None\n logging.exception(e)\n error = unicode(e)\n return data, error\n\nregister(ClickHouse)\n", "path": "redash/query_runner/clickhouse.py"}], "after_files": [{"content": "import logging\nimport re\n\nimport requests\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClickHouse(BaseSQLQueryRunner):\n noop_query = \"SELECT 1\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"default\": \"http://127.0.0.1:8123\"\n },\n \"user\": {\n \"type\": \"string\",\n \"default\": \"default\"\n },\n \"password\": {\n \"type\": \"string\"\n },\n \"dbname\": {\n \"type\": \"string\",\n \"title\": \"Database Name\"\n },\n \"timeout\": {\n \"type\": \"number\",\n \"title\": \"Request Timeout\",\n \"default\": 30\n }\n },\n \"required\": [\"dbname\"],\n \"secret\": [\"password\"]\n }\n\n @classmethod\n def type(cls):\n return \"clickhouse\"\n\n def _get_tables(self, schema):\n query = \"SELECT database, table, name FROM system.columns WHERE database NOT IN ('system')\"\n\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results['rows']:\n table_name = '{}.{}'.format(row['database'], row['table'])\n\n if table_name not in schema:\n schema[table_name] = {'name': table_name, 'columns': []}\n\n schema[table_name]['columns'].append(row['name'])\n\n return schema.values()\n\n def _send_query(self, data, stream=False):\n r = requests.post(\n self.configuration.get('url', \"http://127.0.0.1:8123\"),\n data=data.encode(\"utf-8\"),\n stream=stream,\n timeout=self.configuration.get('timeout', 30),\n params={\n 'user': self.configuration.get('user', \"default\"),\n 'password': self.configuration.get('password', \"\"),\n 'database': self.configuration['dbname']\n }\n )\n if r.status_code != 200:\n raise Exception(r.text)\n # logging.warning(r.json())\n return r.json()\n\n @staticmethod\n def _define_column_type(column):\n c = column.lower()\n f = re.search(r'^nullable\\((.*)\\)$', c)\n if f is not None:\n c = f.group(1)\n if c.startswith('int') or c.startswith('uint'):\n return TYPE_INTEGER\n elif c.startswith('float'):\n return TYPE_FLOAT\n elif c == 'datetime':\n return TYPE_DATETIME\n elif c == 'date':\n return TYPE_DATE\n else:\n return TYPE_STRING\n\n def _clickhouse_query(self, query):\n query += '\\nFORMAT JSON'\n result = self._send_query(query)\n columns = []\n columns_int64 = [] # db converts value to string if its type equals UInt64\n columns_totals = {}\n\n for r in result['meta']:\n column_name = r['name']\n column_type = self._define_column_type(r['type'])\n\n if r['type'] in ('Int64', 'UInt64', 'Nullable(Int64)', 'Nullable(UInt64)'):\n columns_int64.append(column_name)\n else:\n columns_totals[column_name] = 'Total' if column_type == TYPE_STRING else None\n\n columns.append({'name': column_name, 'friendly_name': column_name, 'type': column_type})\n\n rows = result['data']\n for row in rows:\n for column in columns_int64:\n try:\n row[column] = int(row[column])\n except TypeError:\n row[column] = None\n\n if 'totals' in result:\n totals = result['totals']\n for column, value in columns_totals.iteritems():\n totals[column] = value\n rows.append(totals)\n\n return {'columns': columns, 'rows': rows}\n\n def run_query(self, query, user):\n logger.debug(\"Clickhouse is about to execute query: %s\", query)\n if query == \"\":\n json_data = None\n error = \"Query is empty\"\n return json_data, error\n try:\n q = self._clickhouse_query(query)\n data = json_dumps(q)\n error = None\n except Exception as e:\n data = None\n logging.exception(e)\n error = unicode(e)\n return data, error\n\nregister(ClickHouse)\n", "path": "redash/query_runner/clickhouse.py"}]}
1,720
200
gh_patches_debug_17760
rasdani/github-patches
git_diff
joke2k__faker-105
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Default locale to language if no territory given. It would be great that if faker was initialized with only a locale and no territory, that it would use a sensible default. For example I currently have to do the following if using something such as "en" instead of "en_US". ``` py from faker import Factory from faker import AVAILABLE_LOCALES locale = 'en' if locale not in AVAILABLE_LOCALES: locale = next(l for l in AVAILABLE_LOCALES if l.startswith(locale)) factory = Factory.create(locale) ``` This happens when using dynamic mock data in local development where django sets the locale to "en" because we do not define territories. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `faker/factory.py` Content: ``` 1 from __future__ import unicode_literals 2 from __future__ import absolute_import 3 import sys 4 from faker import DEFAULT_LOCALE, DEFAULT_PROVIDERS, AVAILABLE_LOCALES 5 from faker import Generator 6 from faker import providers as providers_mod 7 8 9 class Factory(object): 10 11 @classmethod 12 def create(cls, locale=None, providers=None, generator=None, **config): 13 14 # fix locale to package name 15 locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE 16 if '_' in locale: 17 locale = locale[:2] + locale[2:].upper() 18 if locale not in AVAILABLE_LOCALES: 19 raise AttributeError('Invalid configuration for faker locale "{0}"'.format(locale)) 20 21 providers = providers or DEFAULT_PROVIDERS 22 23 faker = generator or Generator(**config) 24 faker.add_provider(providers_mod.BaseProvider) 25 for provider_name in providers: 26 27 provider_class, lang_found = cls._get_provider_class(provider_name, locale) 28 provider = provider_class(faker) 29 provider.__provider__ = provider_name 30 provider.__lang__ = lang_found 31 faker.add_provider(provider) 32 33 return faker 34 35 @classmethod 36 def _get_provider_class(cls, provider, locale=''): 37 38 provider_class = cls._find_provider_class(provider, locale) 39 40 if provider_class: 41 return provider_class, locale 42 43 if locale and locale != DEFAULT_LOCALE: 44 # fallback to default locale 45 provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE) 46 if provider_class: 47 return provider_class, DEFAULT_LOCALE 48 49 # fallback to no locale 50 provider_class = cls._find_provider_class(provider) 51 if provider_class: 52 return provider_class, None 53 54 raise ValueError('Unable to find provider "{0}" with locale "{1}"'.format(provider, locale)) 55 56 @classmethod 57 def _find_provider_class(cls, provider, locale=''): 58 59 path = "{providers}{lang}.{provider}".format( 60 providers=providers_mod.__package__ or providers_mod.__name__, 61 lang='.' + locale if locale else '', 62 provider=provider 63 ) 64 65 try: 66 __import__(path) 67 except ImportError: 68 return None 69 70 return sys.modules[path].Provider 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/faker/factory.py b/faker/factory.py --- a/faker/factory.py +++ b/faker/factory.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals from __future__ import absolute_import +import locale as pylocale import sys from faker import DEFAULT_LOCALE, DEFAULT_PROVIDERS, AVAILABLE_LOCALES from faker import Generator @@ -13,8 +14,7 @@ # fix locale to package name locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE - if '_' in locale: - locale = locale[:2] + locale[2:].upper() + locale = pylocale.normalize(locale).split('.')[0] if locale not in AVAILABLE_LOCALES: raise AttributeError('Invalid configuration for faker locale "{0}"'.format(locale))
{"golden_diff": "diff --git a/faker/factory.py b/faker/factory.py\n--- a/faker/factory.py\n+++ b/faker/factory.py\n@@ -1,5 +1,6 @@\n from __future__ import unicode_literals\n from __future__ import absolute_import\n+import locale as pylocale\n import sys\n from faker import DEFAULT_LOCALE, DEFAULT_PROVIDERS, AVAILABLE_LOCALES\n from faker import Generator\n@@ -13,8 +14,7 @@\n \n # fix locale to package name\n locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE\n- if '_' in locale:\n- locale = locale[:2] + locale[2:].upper()\n+ locale = pylocale.normalize(locale).split('.')[0]\n if locale not in AVAILABLE_LOCALES:\n raise AttributeError('Invalid configuration for faker locale \"{0}\"'.format(locale))\n", "issue": "Default locale to language if no territory given.\nIt would be great that if faker was initialized with only a locale and no territory, that it would use a sensible default.\n\nFor example I currently have to do the following if using something such as \"en\" instead of \"en_US\". \n\n``` py\nfrom faker import Factory\nfrom faker import AVAILABLE_LOCALES\n\nlocale = 'en'\nif locale not in AVAILABLE_LOCALES:\n locale = next(l for l in AVAILABLE_LOCALES if l.startswith(locale))\n\nfactory = Factory.create(locale)\n```\n\nThis happens when using dynamic mock data in local development where django sets the locale to \"en\" because we do not define territories.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom __future__ import absolute_import\nimport sys\nfrom faker import DEFAULT_LOCALE, DEFAULT_PROVIDERS, AVAILABLE_LOCALES\nfrom faker import Generator\nfrom faker import providers as providers_mod\n\n\nclass Factory(object):\n\n @classmethod\n def create(cls, locale=None, providers=None, generator=None, **config):\n\n # fix locale to package name\n locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE\n if '_' in locale:\n locale = locale[:2] + locale[2:].upper()\n if locale not in AVAILABLE_LOCALES:\n raise AttributeError('Invalid configuration for faker locale \"{0}\"'.format(locale))\n\n providers = providers or DEFAULT_PROVIDERS\n\n faker = generator or Generator(**config)\n faker.add_provider(providers_mod.BaseProvider)\n for provider_name in providers:\n\n provider_class, lang_found = cls._get_provider_class(provider_name, locale)\n provider = provider_class(faker)\n provider.__provider__ = provider_name\n provider.__lang__ = lang_found\n faker.add_provider(provider)\n\n return faker\n\n @classmethod\n def _get_provider_class(cls, provider, locale=''):\n\n provider_class = cls._find_provider_class(provider, locale)\n\n if provider_class:\n return provider_class, locale\n\n if locale and locale != DEFAULT_LOCALE:\n # fallback to default locale\n provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE)\n if provider_class:\n return provider_class, DEFAULT_LOCALE\n\n # fallback to no locale\n provider_class = cls._find_provider_class(provider)\n if provider_class:\n return provider_class, None\n\n raise ValueError('Unable to find provider \"{0}\" with locale \"{1}\"'.format(provider, locale))\n\n @classmethod\n def _find_provider_class(cls, provider, locale=''):\n\n path = \"{providers}{lang}.{provider}\".format(\n providers=providers_mod.__package__ or providers_mod.__name__,\n lang='.' + locale if locale else '',\n provider=provider\n )\n\n try:\n __import__(path)\n except ImportError:\n return None\n\n return sys.modules[path].Provider\n", "path": "faker/factory.py"}], "after_files": [{"content": "from __future__ import unicode_literals\nfrom __future__ import absolute_import\nimport locale as pylocale\nimport sys\nfrom faker import DEFAULT_LOCALE, DEFAULT_PROVIDERS, AVAILABLE_LOCALES\nfrom faker import Generator\nfrom faker import providers as providers_mod\n\n\nclass Factory(object):\n\n @classmethod\n def create(cls, locale=None, providers=None, generator=None, **config):\n\n # fix locale to package name\n locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE\n locale = pylocale.normalize(locale).split('.')[0]\n if locale not in AVAILABLE_LOCALES:\n raise AttributeError('Invalid configuration for faker locale \"{0}\"'.format(locale))\n\n providers = providers or DEFAULT_PROVIDERS\n\n faker = generator or Generator(**config)\n faker.add_provider(providers_mod.BaseProvider)\n for provider_name in providers:\n\n provider_class, lang_found = cls._get_provider_class(provider_name, locale)\n provider = provider_class(faker)\n provider.__provider__ = provider_name\n provider.__lang__ = lang_found\n faker.add_provider(provider)\n\n return faker\n\n @classmethod\n def _get_provider_class(cls, provider, locale=''):\n\n provider_class = cls._find_provider_class(provider, locale)\n\n if provider_class:\n return provider_class, locale\n\n if locale and locale != DEFAULT_LOCALE:\n # fallback to default locale\n provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE)\n if provider_class:\n return provider_class, DEFAULT_LOCALE\n\n # fallback to no locale\n provider_class = cls._find_provider_class(provider)\n if provider_class:\n return provider_class, None\n\n raise ValueError('Unable to find provider \"{0}\" with locale \"{1}\"'.format(provider, locale))\n\n @classmethod\n def _find_provider_class(cls, provider, locale=''):\n\n path = \"{providers}{lang}.{provider}\".format(\n providers=providers_mod.__package__ or providers_mod.__name__,\n lang='.' + locale if locale else '',\n provider=provider\n )\n\n try:\n __import__(path)\n except ImportError:\n return None\n\n return sys.modules[path].Provider\n", "path": "faker/factory.py"}]}
1,002
186
gh_patches_debug_20593
rasdani/github-patches
git_diff
pyload__pyload-1385
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Feature Request: [UploadedNet] Detect Maintenance Mode Please update the UploadedNet plugin so it detects uploaded maintenance mode. Adding a timer to re-check indefinitely every 5 minutes or so might be necessary. Else all links in the queue are skipped as offline and have to be restarted manually. Here is the html code if Uploaded is in maintenance http://paste2.org/OaBy4vZ6 EDiT: A check for the head title "uploaded.net - Maintenance - Wartungsarbeiten" should suffice, I guess.. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `module/plugins/hoster/UploadedTo.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 import re 4 import time 5 6 from module.network.RequestFactory import getURL 7 from module.plugins.internal.CaptchaService import ReCaptcha 8 from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo 9 10 11 class UploadedTo(SimpleHoster): 12 __name__ = "UploadedTo" 13 __type__ = "hoster" 14 __version__ = "0.86" 15 16 __pattern__ = r'https?://(?:www\.)?(uploaded\.(to|net)|ul\.to)(/file/|/?\?id=|.*?&id=|/)(?P<ID>\w+)' 17 __config__ = [("use_premium", "bool", "Use premium account if available", True)] 18 19 __description__ = """Uploaded.net hoster plugin""" 20 __license__ = "GPLv3" 21 __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] 22 23 24 DISPOSITION = False 25 26 API_KEY = "lhF2IeeprweDfu9ccWlxXVVypA5nA3EL" 27 28 URL_REPLACEMENTS = [(__pattern__ + ".*", r'http://uploaded.net/file/\g<ID>')] 29 30 LINK_PREMIUM_PATTERN = r'<div class="tfree".*\s*<form method="post" action="(.+?)"' 31 32 WAIT_PATTERN = r'Current waiting period: <span>(\d+)' 33 DL_LIMIT_ERROR = r'You have reached the max. number of possible free downloads for this hour' 34 35 36 @classmethod 37 def apiInfo(cls, url="", get={}, post={}): 38 info = super(UploadedTo, cls).apiInfo(url) 39 40 for _i in xrange(5): 41 html = getURL("http://uploaded.net/api/filemultiple", 42 get={"apikey": cls.API_KEY, 'id_0': re.match(cls.__pattern__, url).group('ID')}, 43 decode=True) 44 45 if html != "can't find request": 46 api = html.split(",", 4) 47 if api[0] == "online": 48 info.update({'name': api[4].strip(), 'size': api[2], 'status': 2}) 49 else: 50 info['status'] = 1 51 break 52 else: 53 time.sleep(3) 54 55 return info 56 57 58 def setup(self): 59 self.multiDL = self.resumeDownload = self.premium 60 self.chunkLimit = 1 # critical problems with more chunks 61 62 63 def checkErrors(self): 64 if 'var free_enabled = false;' in self.html: 65 self.logError(_("Free-download capacities exhausted")) 66 self.retry(24, 5 * 60) 67 68 elif "limit-size" in self.html: 69 self.fail(_("File too big for free download")) 70 71 elif "limit-slot" in self.html: # Temporary restriction so just wait a bit 72 self.wait(30 * 60, True) 73 self.retry() 74 75 elif "limit-parallel" in self.html: 76 self.fail(_("Cannot download in parallel")) 77 78 elif "limit-dl" in self.html or self.DL_LIMIT_ERROR in self.html: # limit-dl 79 self.wait(3 * 60 * 60, True) 80 self.retry() 81 82 elif '"err":"captcha"' in self.html: 83 self.invalidCaptcha() 84 85 else: 86 m = re.search(self.WAIT_PATTERN, self.html) 87 if m: 88 self.wait(m.group(1)) 89 90 91 def handleFree(self, pyfile): 92 self.load("http://uploaded.net/language/en", just_header=True) 93 94 self.html = self.load("http://uploaded.net/js/download.js", decode=True) 95 96 recaptcha = ReCaptcha(self) 97 response, challenge = recaptcha.challenge() 98 99 self.html = self.load("http://uploaded.net/io/ticket/captcha/%s" % self.info['pattern']['ID'], 100 post={'recaptcha_challenge_field': challenge, 101 'recaptcha_response_field' : response}) 102 103 if "type:'download'" in self.html: 104 self.correctCaptcha() 105 try: 106 self.link = re.search("url:'(.+?)'", self.html).group(1) 107 108 except Exception: 109 pass 110 111 self.checkErrors() 112 113 114 def checkFile(self, rules={}): 115 if self.checkDownload({'limit-dl': self.DL_LIMIT_ERROR}): 116 self.wait(3 * 60 * 60, True) 117 self.retry() 118 119 return super(UploadedTo, self).checkFile(rules) 120 121 122 getInfo = create_getInfo(UploadedTo) 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/module/plugins/hoster/UploadedTo.py b/module/plugins/hoster/UploadedTo.py --- a/module/plugins/hoster/UploadedTo.py +++ b/module/plugins/hoster/UploadedTo.py @@ -11,7 +11,7 @@ class UploadedTo(SimpleHoster): __name__ = "UploadedTo" __type__ = "hoster" - __version__ = "0.86" + __version__ = "0.87" __pattern__ = r'https?://(?:www\.)?(uploaded\.(to|net)|ul\.to)(/file/|/?\?id=|.*?&id=|/)(?P<ID>\w+)' __config__ = [("use_premium", "bool", "Use premium account if available", True)] @@ -27,6 +27,8 @@ URL_REPLACEMENTS = [(__pattern__ + ".*", r'http://uploaded.net/file/\g<ID>')] + TEMP_OFFLINE_PATTERN = r'<title>uploaded\.net - Maintenance - Wartungsarbeiten</title>' + LINK_PREMIUM_PATTERN = r'<div class="tfree".*\s*<form method="post" action="(.+?)"' WAIT_PATTERN = r'Current waiting period: <span>(\d+)'
{"golden_diff": "diff --git a/module/plugins/hoster/UploadedTo.py b/module/plugins/hoster/UploadedTo.py\n--- a/module/plugins/hoster/UploadedTo.py\n+++ b/module/plugins/hoster/UploadedTo.py\n@@ -11,7 +11,7 @@\n class UploadedTo(SimpleHoster):\n __name__ = \"UploadedTo\"\n __type__ = \"hoster\"\n- __version__ = \"0.86\"\n+ __version__ = \"0.87\"\n \n __pattern__ = r'https?://(?:www\\.)?(uploaded\\.(to|net)|ul\\.to)(/file/|/?\\?id=|.*?&id=|/)(?P<ID>\\w+)'\n __config__ = [(\"use_premium\", \"bool\", \"Use premium account if available\", True)]\n@@ -27,6 +27,8 @@\n \n URL_REPLACEMENTS = [(__pattern__ + \".*\", r'http://uploaded.net/file/\\g<ID>')]\n \n+ TEMP_OFFLINE_PATTERN = r'<title>uploaded\\.net - Maintenance - Wartungsarbeiten</title>'\n+\n LINK_PREMIUM_PATTERN = r'<div class=\"tfree\".*\\s*<form method=\"post\" action=\"(.+?)\"'\n \n WAIT_PATTERN = r'Current waiting period: <span>(\\d+)'\n", "issue": "Feature Request: [UploadedNet] Detect Maintenance Mode\nPlease update the UploadedNet plugin so it detects uploaded maintenance mode.\n\nAdding a timer to re-check indefinitely every 5 minutes or so might be necessary.\n\nElse all links in the queue are skipped as offline and have to be restarted manually.\n\nHere is the html code if Uploaded is in maintenance\n\nhttp://paste2.org/OaBy4vZ6\n\nEDiT: A check for the head title \"uploaded.net - Maintenance - Wartungsarbeiten\" should suffice, I guess..\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\nimport time\n\nfrom module.network.RequestFactory import getURL\nfrom module.plugins.internal.CaptchaService import ReCaptcha\nfrom module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo\n\n\nclass UploadedTo(SimpleHoster):\n __name__ = \"UploadedTo\"\n __type__ = \"hoster\"\n __version__ = \"0.86\"\n\n __pattern__ = r'https?://(?:www\\.)?(uploaded\\.(to|net)|ul\\.to)(/file/|/?\\?id=|.*?&id=|/)(?P<ID>\\w+)'\n __config__ = [(\"use_premium\", \"bool\", \"Use premium account if available\", True)]\n\n __description__ = \"\"\"Uploaded.net hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Walter Purcaro\", \"vuolter@gmail.com\")]\n\n\n DISPOSITION = False\n\n API_KEY = \"lhF2IeeprweDfu9ccWlxXVVypA5nA3EL\"\n\n URL_REPLACEMENTS = [(__pattern__ + \".*\", r'http://uploaded.net/file/\\g<ID>')]\n\n LINK_PREMIUM_PATTERN = r'<div class=\"tfree\".*\\s*<form method=\"post\" action=\"(.+?)\"'\n\n WAIT_PATTERN = r'Current waiting period: <span>(\\d+)'\n DL_LIMIT_ERROR = r'You have reached the max. number of possible free downloads for this hour'\n\n\n @classmethod\n def apiInfo(cls, url=\"\", get={}, post={}):\n info = super(UploadedTo, cls).apiInfo(url)\n\n for _i in xrange(5):\n html = getURL(\"http://uploaded.net/api/filemultiple\",\n get={\"apikey\": cls.API_KEY, 'id_0': re.match(cls.__pattern__, url).group('ID')},\n decode=True)\n\n if html != \"can't find request\":\n api = html.split(\",\", 4)\n if api[0] == \"online\":\n info.update({'name': api[4].strip(), 'size': api[2], 'status': 2})\n else:\n info['status'] = 1\n break\n else:\n time.sleep(3)\n\n return info\n\n\n def setup(self):\n self.multiDL = self.resumeDownload = self.premium\n self.chunkLimit = 1 # critical problems with more chunks\n\n\n def checkErrors(self):\n if 'var free_enabled = false;' in self.html:\n self.logError(_(\"Free-download capacities exhausted\"))\n self.retry(24, 5 * 60)\n\n elif \"limit-size\" in self.html:\n self.fail(_(\"File too big for free download\"))\n\n elif \"limit-slot\" in self.html: # Temporary restriction so just wait a bit\n self.wait(30 * 60, True)\n self.retry()\n\n elif \"limit-parallel\" in self.html:\n self.fail(_(\"Cannot download in parallel\"))\n\n elif \"limit-dl\" in self.html or self.DL_LIMIT_ERROR in self.html: # limit-dl\n self.wait(3 * 60 * 60, True)\n self.retry()\n\n elif '\"err\":\"captcha\"' in self.html:\n self.invalidCaptcha()\n\n else:\n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n self.wait(m.group(1))\n\n\n def handleFree(self, pyfile):\n self.load(\"http://uploaded.net/language/en\", just_header=True)\n\n self.html = self.load(\"http://uploaded.net/js/download.js\", decode=True)\n\n recaptcha = ReCaptcha(self)\n response, challenge = recaptcha.challenge()\n\n self.html = self.load(\"http://uploaded.net/io/ticket/captcha/%s\" % self.info['pattern']['ID'],\n post={'recaptcha_challenge_field': challenge,\n 'recaptcha_response_field' : response})\n\n if \"type:'download'\" in self.html:\n self.correctCaptcha()\n try:\n self.link = re.search(\"url:'(.+?)'\", self.html).group(1)\n\n except Exception:\n pass\n\n self.checkErrors()\n\n\n def checkFile(self, rules={}):\n if self.checkDownload({'limit-dl': self.DL_LIMIT_ERROR}):\n self.wait(3 * 60 * 60, True)\n self.retry()\n\n return super(UploadedTo, self).checkFile(rules)\n\n\ngetInfo = create_getInfo(UploadedTo)\n", "path": "module/plugins/hoster/UploadedTo.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\nimport time\n\nfrom module.network.RequestFactory import getURL\nfrom module.plugins.internal.CaptchaService import ReCaptcha\nfrom module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo\n\n\nclass UploadedTo(SimpleHoster):\n __name__ = \"UploadedTo\"\n __type__ = \"hoster\"\n __version__ = \"0.87\"\n\n __pattern__ = r'https?://(?:www\\.)?(uploaded\\.(to|net)|ul\\.to)(/file/|/?\\?id=|.*?&id=|/)(?P<ID>\\w+)'\n __config__ = [(\"use_premium\", \"bool\", \"Use premium account if available\", True)]\n\n __description__ = \"\"\"Uploaded.net hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Walter Purcaro\", \"vuolter@gmail.com\")]\n\n\n DISPOSITION = False\n\n API_KEY = \"lhF2IeeprweDfu9ccWlxXVVypA5nA3EL\"\n\n URL_REPLACEMENTS = [(__pattern__ + \".*\", r'http://uploaded.net/file/\\g<ID>')]\n\n TEMP_OFFLINE_PATTERN = r'<title>uploaded\\.net - Maintenance - Wartungsarbeiten</title>'\n\n LINK_PREMIUM_PATTERN = r'<div class=\"tfree\".*\\s*<form method=\"post\" action=\"(.+?)\"'\n\n WAIT_PATTERN = r'Current waiting period: <span>(\\d+)'\n DL_LIMIT_ERROR = r'You have reached the max. number of possible free downloads for this hour'\n\n\n @classmethod\n def apiInfo(cls, url=\"\", get={}, post={}):\n info = super(UploadedTo, cls).apiInfo(url)\n\n for _i in xrange(5):\n html = getURL(\"http://uploaded.net/api/filemultiple\",\n get={\"apikey\": cls.API_KEY, 'id_0': re.match(cls.__pattern__, url).group('ID')},\n decode=True)\n\n if html != \"can't find request\":\n api = html.split(\",\", 4)\n if api[0] == \"online\":\n info.update({'name': api[4].strip(), 'size': api[2], 'status': 2})\n else:\n info['status'] = 1\n break\n else:\n time.sleep(3)\n\n return info\n\n\n def setup(self):\n self.multiDL = self.resumeDownload = self.premium\n self.chunkLimit = 1 # critical problems with more chunks\n\n\n def checkErrors(self):\n if 'var free_enabled = false;' in self.html:\n self.logError(_(\"Free-download capacities exhausted\"))\n self.retry(24, 5 * 60)\n\n elif \"limit-size\" in self.html:\n self.fail(_(\"File too big for free download\"))\n\n elif \"limit-slot\" in self.html: # Temporary restriction so just wait a bit\n self.wait(30 * 60, True)\n self.retry()\n\n elif \"limit-parallel\" in self.html:\n self.fail(_(\"Cannot download in parallel\"))\n\n elif \"limit-dl\" in self.html or self.DL_LIMIT_ERROR in self.html: # limit-dl\n self.wait(3 * 60 * 60, True)\n self.retry()\n\n elif '\"err\":\"captcha\"' in self.html:\n self.invalidCaptcha()\n\n else:\n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n self.wait(m.group(1))\n\n\n def handleFree(self, pyfile):\n self.load(\"http://uploaded.net/language/en\", just_header=True)\n\n self.html = self.load(\"http://uploaded.net/js/download.js\", decode=True)\n\n recaptcha = ReCaptcha(self)\n response, challenge = recaptcha.challenge()\n\n self.html = self.load(\"http://uploaded.net/io/ticket/captcha/%s\" % self.info['pattern']['ID'],\n post={'recaptcha_challenge_field': challenge,\n 'recaptcha_response_field' : response})\n\n if \"type:'download'\" in self.html:\n self.correctCaptcha()\n try:\n self.link = re.search(\"url:'(.+?)'\", self.html).group(1)\n\n except Exception:\n pass\n\n self.checkErrors()\n\n\n def checkFile(self, rules={}):\n if self.checkDownload({'limit-dl': self.DL_LIMIT_ERROR}):\n self.wait(3 * 60 * 60, True)\n self.retry()\n\n return super(UploadedTo, self).checkFile(rules)\n\n\ngetInfo = create_getInfo(UploadedTo)\n", "path": "module/plugins/hoster/UploadedTo.py"}]}
1,651
298
gh_patches_debug_9143
rasdani/github-patches
git_diff
google-deepmind__dm-haiku-168
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Examples are distributed as part of the package Hi, I think the examples are folder are currently packaged as part of the package on PyPI. This means that installing haiku will also install the examples as the package `examples`. Should these be excluded from the distribution? JAX also has examples in their repo, but those are excluded from packaging in https://github.com/google/jax/blob/main/setup.py#L33 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 """Setup for pip package.""" 16 17 from setuptools import find_namespace_packages 18 from setuptools import setup 19 20 21 def _get_version(): 22 with open('haiku/__init__.py') as fp: 23 for line in fp: 24 if line.startswith('__version__'): 25 g = {} 26 exec(line, g) # pylint: disable=exec-used 27 return g['__version__'] 28 raise ValueError('`__version__` not defined in `haiku/__init__.py`') 29 30 31 def _parse_requirements(requirements_txt_path): 32 with open(requirements_txt_path) as fp: 33 return fp.read().splitlines() 34 35 36 _VERSION = _get_version() 37 38 EXTRA_PACKAGES = { 39 'jax': ['jax>=0.1.71'], 40 'jaxlib': ['jaxlib>=0.1.49'], 41 } 42 43 setup( 44 name='dm-haiku', 45 version=_VERSION, 46 url='https://github.com/deepmind/dm-haiku', 47 license='Apache 2.0', 48 author='DeepMind', 49 description='Haiku is a library for building neural networks in JAX.', 50 long_description=open('README.md').read(), 51 long_description_content_type='text/markdown', 52 author_email='haiku-dev-os@google.com', 53 # Contained modules and scripts. 54 packages=find_namespace_packages(exclude=['*_test.py']), 55 install_requires=_parse_requirements('requirements.txt'), 56 extras_require=EXTRA_PACKAGES, 57 tests_require=_parse_requirements('requirements-test.txt'), 58 requires_python='>=3.7', 59 include_package_data=True, 60 zip_safe=False, 61 # PyPI package information. 62 classifiers=[ 63 'Development Status :: 4 - Beta', 64 'Intended Audience :: Developers', 65 'Intended Audience :: Education', 66 'Intended Audience :: Science/Research', 67 'License :: OSI Approved :: Apache Software License', 68 'Programming Language :: Python :: 3', 69 'Programming Language :: Python :: 3.7', 70 'Programming Language :: Python :: 3.8', 71 'Topic :: Scientific/Engineering :: Mathematics', 72 'Topic :: Software Development :: Libraries :: Python Modules', 73 'Topic :: Software Development :: Libraries', 74 ], 75 ) 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -51,7 +51,7 @@ long_description_content_type='text/markdown', author_email='haiku-dev-os@google.com', # Contained modules and scripts. - packages=find_namespace_packages(exclude=['*_test.py']), + packages=find_namespace_packages(exclude=['*_test.py', 'examples']), install_requires=_parse_requirements('requirements.txt'), extras_require=EXTRA_PACKAGES, tests_require=_parse_requirements('requirements-test.txt'),
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -51,7 +51,7 @@\n long_description_content_type='text/markdown',\n author_email='haiku-dev-os@google.com',\n # Contained modules and scripts.\n- packages=find_namespace_packages(exclude=['*_test.py']),\n+ packages=find_namespace_packages(exclude=['*_test.py', 'examples']),\n install_requires=_parse_requirements('requirements.txt'),\n extras_require=EXTRA_PACKAGES,\n tests_require=_parse_requirements('requirements-test.txt'),\n", "issue": "Examples are distributed as part of the package\nHi, \r\n\r\nI think the examples are folder are currently packaged as part of the package on PyPI. This means that installing haiku will also install the examples as the package `examples`. Should these be excluded from the distribution?\r\n\r\nJAX also has examples in their repo, but those are excluded from packaging in \r\n\r\nhttps://github.com/google/jax/blob/main/setup.py#L33\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Setup for pip package.\"\"\"\n\nfrom setuptools import find_namespace_packages\nfrom setuptools import setup\n\n\ndef _get_version():\n with open('haiku/__init__.py') as fp:\n for line in fp:\n if line.startswith('__version__'):\n g = {}\n exec(line, g) # pylint: disable=exec-used\n return g['__version__']\n raise ValueError('`__version__` not defined in `haiku/__init__.py`')\n\n\ndef _parse_requirements(requirements_txt_path):\n with open(requirements_txt_path) as fp:\n return fp.read().splitlines()\n\n\n_VERSION = _get_version()\n\nEXTRA_PACKAGES = {\n 'jax': ['jax>=0.1.71'],\n 'jaxlib': ['jaxlib>=0.1.49'],\n}\n\nsetup(\n name='dm-haiku',\n version=_VERSION,\n url='https://github.com/deepmind/dm-haiku',\n license='Apache 2.0',\n author='DeepMind',\n description='Haiku is a library for building neural networks in JAX.',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author_email='haiku-dev-os@google.com',\n # Contained modules and scripts.\n packages=find_namespace_packages(exclude=['*_test.py']),\n install_requires=_parse_requirements('requirements.txt'),\n extras_require=EXTRA_PACKAGES,\n tests_require=_parse_requirements('requirements-test.txt'),\n requires_python='>=3.7',\n include_package_data=True,\n zip_safe=False,\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Setup for pip package.\"\"\"\n\nfrom setuptools import find_namespace_packages\nfrom setuptools import setup\n\n\ndef _get_version():\n with open('haiku/__init__.py') as fp:\n for line in fp:\n if line.startswith('__version__'):\n g = {}\n exec(line, g) # pylint: disable=exec-used\n return g['__version__']\n raise ValueError('`__version__` not defined in `haiku/__init__.py`')\n\n\ndef _parse_requirements(requirements_txt_path):\n with open(requirements_txt_path) as fp:\n return fp.read().splitlines()\n\n\n_VERSION = _get_version()\n\nEXTRA_PACKAGES = {\n 'jax': ['jax>=0.1.71'],\n 'jaxlib': ['jaxlib>=0.1.49'],\n}\n\nsetup(\n name='dm-haiku',\n version=_VERSION,\n url='https://github.com/deepmind/dm-haiku',\n license='Apache 2.0',\n author='DeepMind',\n description='Haiku is a library for building neural networks in JAX.',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author_email='haiku-dev-os@google.com',\n # Contained modules and scripts.\n packages=find_namespace_packages(exclude=['*_test.py', 'examples']),\n install_requires=_parse_requirements('requirements.txt'),\n extras_require=EXTRA_PACKAGES,\n tests_require=_parse_requirements('requirements-test.txt'),\n requires_python='>=3.7',\n include_package_data=True,\n zip_safe=False,\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}]}
1,096
121
gh_patches_debug_2119
rasdani/github-patches
git_diff
qtile__qtile-1578
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- qtile error to load --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libqtile/scripts/qtile.py` Content: ``` 1 # Copyright (c) 2008, Aldo Cortesi. All rights reserved. 2 # Copyright (c) 2011, Florian Mounier 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining a copy 5 # of this software and associated documentation files (the "Software"), to deal 6 # in the Software without restriction, including without limitation the rights 7 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 # copies of the Software, and to permit persons to whom the Software is 9 # furnished to do so, subject to the following conditions: 10 # 11 # The above copyright notice and this permission notice shall be included in 12 # all copies or substantial portions of the Software. 13 # 14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 20 # SOFTWARE. 21 22 # Set the locale before any widgets or anything are imported, so any widget 23 # whose defaults depend on a reasonable locale sees something reasonable. 24 import locale 25 import logging 26 from os import path, getenv, makedirs 27 28 from libqtile.log_utils import init_log, logger 29 from libqtile import confreader 30 from libqtile.backend.x11 import xcore 31 32 locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore 33 34 try: 35 import pkg_resources 36 VERSION = pkg_resources.require("qtile")[0].version 37 except (pkg_resources.DistributionNotFound, ImportError): 38 VERSION = 'dev' 39 40 41 def rename_process(): 42 """ 43 Try to rename the qtile process if py-setproctitle is installed: 44 45 http://code.google.com/p/py-setproctitle/ 46 47 Will fail silently if it's not installed. Setting the title lets you do 48 stuff like "killall qtile". 49 """ 50 try: 51 import setproctitle 52 setproctitle.setproctitle("qtile") 53 except ImportError: 54 pass 55 56 57 def make_qtile(): 58 from argparse import ArgumentParser 59 parser = ArgumentParser( 60 description='A full-featured, pure-Python tiling window manager.', 61 prog='qtile', 62 ) 63 parser.add_argument( 64 '--version', 65 action='version', 66 version=VERSION, 67 ) 68 parser.add_argument( 69 "-c", "--config", 70 action="store", 71 default=path.expanduser(path.join( 72 getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')), 73 dest="configfile", 74 help='Use the specified configuration file', 75 ) 76 parser.add_argument( 77 "-s", "--socket", 78 action="store", 79 default=None, 80 dest="socket", 81 help='Path of the Qtile IPC socket.' 82 ) 83 parser.add_argument( 84 "-n", "--no-spawn", 85 action="store_true", 86 default=False, 87 dest="no_spawn", 88 help='Avoid spawning apps. (Used for restart)' 89 ) 90 parser.add_argument( 91 '-l', '--log-level', 92 default='WARNING', 93 dest='log_level', 94 choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'), 95 help='Set qtile log level' 96 ) 97 parser.add_argument( 98 '--with-state', 99 default=None, 100 dest='state', 101 help='Pickled QtileState object (typically used only internally)', 102 ) 103 options = parser.parse_args() 104 log_level = getattr(logging, options.log_level) 105 init_log(log_level=log_level) 106 107 kore = xcore.XCore() 108 try: 109 if not path.isfile(options.configfile): 110 try: 111 makedirs(path.dirname(options.configfile), exist_ok=True) 112 from shutil import copyfile 113 default_config_path = path.join(path.dirname(__file__), 114 "..", 115 "resources", 116 "default_config.py") 117 copyfile(default_config_path, options.configfile) 118 logger.info('Copied default_config.py to %s', options.configfile) 119 except Exception as e: 120 logger.exception('Failed to copy default_config.py to %s: (%s)', 121 options.configfile, e) 122 123 config = confreader.Config.from_file(kore, options.configfile) 124 except Exception as e: 125 logger.exception('Error while reading config file (%s)', e) 126 config = confreader.Config() 127 from libqtile.widget import TextBox 128 widgets = config.screens[0].bottom.widgets 129 widgets.insert(0, TextBox('Config Err!')) 130 131 # XXX: the import is here because we need to call init_log 132 # before start importing stuff 133 from libqtile.core import session_manager 134 return session_manager.SessionManager( 135 kore, 136 config, 137 fname=options.socket, 138 no_spawn=options.no_spawn, 139 state=options.state, 140 ) 141 142 143 def main(): 144 rename_process() 145 q = make_qtile() 146 try: 147 q.loop() 148 except Exception: 149 logger.exception('Qtile crashed') 150 logger.info('Exiting...') 151 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libqtile/scripts/qtile.py b/libqtile/scripts/qtile.py --- a/libqtile/scripts/qtile.py +++ b/libqtile/scripts/qtile.py @@ -29,7 +29,11 @@ from libqtile import confreader from libqtile.backend.x11 import xcore -locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore +try: + locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore +except locale.Error: + pass + try: import pkg_resources
{"golden_diff": "diff --git a/libqtile/scripts/qtile.py b/libqtile/scripts/qtile.py\n--- a/libqtile/scripts/qtile.py\n+++ b/libqtile/scripts/qtile.py\n@@ -29,7 +29,11 @@\n from libqtile import confreader\n from libqtile.backend.x11 import xcore\n \n-locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore\n+try:\n+ locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore\n+except locale.Error:\n+ pass\n+\n \n try:\n import pkg_resources\n", "issue": "qtile error to load\n\n", "before_files": [{"content": "# Copyright (c) 2008, Aldo Cortesi. All rights reserved.\n# Copyright (c) 2011, Florian Mounier\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Set the locale before any widgets or anything are imported, so any widget\n# whose defaults depend on a reasonable locale sees something reasonable.\nimport locale\nimport logging\nfrom os import path, getenv, makedirs\n\nfrom libqtile.log_utils import init_log, logger\nfrom libqtile import confreader\nfrom libqtile.backend.x11 import xcore\n\nlocale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore\n\ntry:\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\nexcept (pkg_resources.DistributionNotFound, ImportError):\n VERSION = 'dev'\n\n\ndef rename_process():\n \"\"\"\n Try to rename the qtile process if py-setproctitle is installed:\n\n http://code.google.com/p/py-setproctitle/\n\n Will fail silently if it's not installed. Setting the title lets you do\n stuff like \"killall qtile\".\n \"\"\"\n try:\n import setproctitle\n setproctitle.setproctitle(\"qtile\")\n except ImportError:\n pass\n\n\ndef make_qtile():\n from argparse import ArgumentParser\n parser = ArgumentParser(\n description='A full-featured, pure-Python tiling window manager.',\n prog='qtile',\n )\n parser.add_argument(\n '--version',\n action='version',\n version=VERSION,\n )\n parser.add_argument(\n \"-c\", \"--config\",\n action=\"store\",\n default=path.expanduser(path.join(\n getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),\n dest=\"configfile\",\n help='Use the specified configuration file',\n )\n parser.add_argument(\n \"-s\", \"--socket\",\n action=\"store\",\n default=None,\n dest=\"socket\",\n help='Path of the Qtile IPC socket.'\n )\n parser.add_argument(\n \"-n\", \"--no-spawn\",\n action=\"store_true\",\n default=False,\n dest=\"no_spawn\",\n help='Avoid spawning apps. (Used for restart)'\n )\n parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n parser.add_argument(\n '--with-state',\n default=None,\n dest='state',\n help='Pickled QtileState object (typically used only internally)',\n )\n options = parser.parse_args()\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level)\n\n kore = xcore.XCore()\n try:\n if not path.isfile(options.configfile):\n try:\n makedirs(path.dirname(options.configfile), exist_ok=True)\n from shutil import copyfile\n default_config_path = path.join(path.dirname(__file__),\n \"..\",\n \"resources\",\n \"default_config.py\")\n copyfile(default_config_path, options.configfile)\n logger.info('Copied default_config.py to %s', options.configfile)\n except Exception as e:\n logger.exception('Failed to copy default_config.py to %s: (%s)',\n options.configfile, e)\n\n config = confreader.Config.from_file(kore, options.configfile)\n except Exception as e:\n logger.exception('Error while reading config file (%s)', e)\n config = confreader.Config()\n from libqtile.widget import TextBox\n widgets = config.screens[0].bottom.widgets\n widgets.insert(0, TextBox('Config Err!'))\n\n # XXX: the import is here because we need to call init_log\n # before start importing stuff\n from libqtile.core import session_manager\n return session_manager.SessionManager(\n kore,\n config,\n fname=options.socket,\n no_spawn=options.no_spawn,\n state=options.state,\n )\n\n\ndef main():\n rename_process()\n q = make_qtile()\n try:\n q.loop()\n except Exception:\n logger.exception('Qtile crashed')\n logger.info('Exiting...')\n", "path": "libqtile/scripts/qtile.py"}], "after_files": [{"content": "# Copyright (c) 2008, Aldo Cortesi. All rights reserved.\n# Copyright (c) 2011, Florian Mounier\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Set the locale before any widgets or anything are imported, so any widget\n# whose defaults depend on a reasonable locale sees something reasonable.\nimport locale\nimport logging\nfrom os import path, getenv, makedirs\n\nfrom libqtile.log_utils import init_log, logger\nfrom libqtile import confreader\nfrom libqtile.backend.x11 import xcore\n\ntry:\n locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore\nexcept locale.Error:\n pass\n\n\ntry:\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\nexcept (pkg_resources.DistributionNotFound, ImportError):\n VERSION = 'dev'\n\n\ndef rename_process():\n \"\"\"\n Try to rename the qtile process if py-setproctitle is installed:\n\n http://code.google.com/p/py-setproctitle/\n\n Will fail silently if it's not installed. Setting the title lets you do\n stuff like \"killall qtile\".\n \"\"\"\n try:\n import setproctitle\n setproctitle.setproctitle(\"qtile\")\n except ImportError:\n pass\n\n\ndef make_qtile():\n from argparse import ArgumentParser\n parser = ArgumentParser(\n description='A full-featured, pure-Python tiling window manager.',\n prog='qtile',\n )\n parser.add_argument(\n '--version',\n action='version',\n version=VERSION,\n )\n parser.add_argument(\n \"-c\", \"--config\",\n action=\"store\",\n default=path.expanduser(path.join(\n getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),\n dest=\"configfile\",\n help='Use the specified configuration file',\n )\n parser.add_argument(\n \"-s\", \"--socket\",\n action=\"store\",\n default=None,\n dest=\"socket\",\n help='Path of the Qtile IPC socket.'\n )\n parser.add_argument(\n \"-n\", \"--no-spawn\",\n action=\"store_true\",\n default=False,\n dest=\"no_spawn\",\n help='Avoid spawning apps. (Used for restart)'\n )\n parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n parser.add_argument(\n '--with-state',\n default=None,\n dest='state',\n help='Pickled QtileState object (typically used only internally)',\n )\n options = parser.parse_args()\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level)\n\n kore = xcore.XCore()\n try:\n if not path.isfile(options.configfile):\n try:\n makedirs(path.dirname(options.configfile), exist_ok=True)\n from shutil import copyfile\n default_config_path = path.join(path.dirname(__file__),\n \"..\",\n \"resources\",\n \"default_config.py\")\n copyfile(default_config_path, options.configfile)\n logger.info('Copied default_config.py to %s', options.configfile)\n except Exception as e:\n logger.exception('Failed to copy default_config.py to %s: (%s)',\n options.configfile, e)\n\n config = confreader.Config.from_file(kore, options.configfile)\n except Exception as e:\n logger.exception('Error while reading config file (%s)', e)\n config = confreader.Config()\n from libqtile.widget import TextBox\n widgets = config.screens[0].bottom.widgets\n widgets.insert(0, TextBox('Config Err!'))\n\n # XXX: the import is here because we need to call init_log\n # before start importing stuff\n from libqtile.core import session_manager\n return session_manager.SessionManager(\n kore,\n config,\n fname=options.socket,\n no_spawn=options.no_spawn,\n state=options.state,\n )\n\n\ndef main():\n rename_process()\n q = make_qtile()\n try:\n q.loop()\n except Exception:\n logger.exception('Qtile crashed')\n logger.info('Exiting...')\n", "path": "libqtile/scripts/qtile.py"}]}
1,740
135
gh_patches_debug_11280
rasdani/github-patches
git_diff
scverse__scanpy-1856
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Increase visibility of ecosystem page As discussed at last meeting - [ ] Document process for adding entries (note this on top of ecosystem page) - [ ] Link from external - [ ] Clarify goals/ differences b/w ecosystem and external --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scanpy/external/__init__.py` Content: ``` 1 from . import tl 2 from . import pl 3 from . import pp 4 from . import exporting 5 6 import sys 7 from .. import _utils 8 9 _utils.annotate_doc_types(sys.modules[__name__], 'scanpy') 10 del sys, _utils 11 12 13 __doc__ = """\ 14 External API 15 ============ 16 17 18 Import Scanpy's wrappers to external tools as:: 19 20 import scanpy.external as sce 21 22 If you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_! 23 24 Preprocessing: PP 25 ------------------ 26 27 Data integration 28 ~~~~~~~~~~~~~~~~ 29 30 .. autosummary:: 31 :toctree: . 32 33 pp.bbknn 34 pp.harmony_integrate 35 pp.mnn_correct 36 pp.scanorama_integrate 37 38 39 Sample demultiplexing, Doublet detection 40 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 41 42 .. autosummary:: 43 :toctree: . 44 45 pp.scrublet 46 pp.scrublet_simulate_doublets 47 pl.scrublet_score_distribution 48 pp.hashsolo 49 50 Imputation 51 ~~~~~~~~~~ 52 53 Note that the fundamental limitations of imputation are still under `debate 54 <https://github.com/theislab/scanpy/issues/189>`__. 55 56 .. autosummary:: 57 :toctree: . 58 59 pp.dca 60 pp.magic 61 62 63 Tools: TL 64 ---------- 65 66 Embeddings 67 ~~~~~~~~~~ 68 69 .. autosummary:: 70 :toctree: . 71 72 tl.phate 73 tl.palantir 74 tl.trimap 75 tl.sam 76 77 Clustering and trajectory inference 78 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 79 80 .. autosummary:: 81 :toctree: . 82 83 tl.phenograph 84 tl.harmony_timeseries 85 tl.wishbone 86 tl.palantir 87 tl.palantir_results 88 89 Gene scores, Cell cycle 90 ~~~~~~~~~~~~~~~~~~~~~~~ 91 92 .. autosummary:: 93 :toctree: . 94 95 tl.sandbag 96 tl.cyclone 97 98 99 Plotting: PL 100 ------------ 101 102 .. autosummary:: 103 :toctree: . 104 105 pl.phate 106 pl.trimap 107 pl.sam 108 pl.wishbone_marker_trajectory 109 110 Exporting 111 --------- 112 113 .. autosummary:: 114 :toctree: . 115 116 exporting.spring_project 117 exporting.cellbrowser 118 """ 119 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scanpy/external/__init__.py b/scanpy/external/__init__.py --- a/scanpy/external/__init__.py +++ b/scanpy/external/__init__.py @@ -14,12 +14,15 @@ External API ============ +.. note:: + More tools that integrate well with scanpy and anndata can be found on the :doc:`ecosystem page <../ecosystem>`. Import Scanpy's wrappers to external tools as:: import scanpy.external as sce -If you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_! +If you'd like to include a tool here, consider making a pull request (:doc:`instructions <../dev/external-tools>`). +If the tool already uses `scanpy` or `anndata`, it may fit better in the :doc:`ecosystem page <../ecosystem>`. Preprocessing: PP ------------------
{"golden_diff": "diff --git a/scanpy/external/__init__.py b/scanpy/external/__init__.py\n--- a/scanpy/external/__init__.py\n+++ b/scanpy/external/__init__.py\n@@ -14,12 +14,15 @@\n External API\n ============\n \n+.. note::\n+ More tools that integrate well with scanpy and anndata can be found on the :doc:`ecosystem page <../ecosystem>`.\n \n Import Scanpy's wrappers to external tools as::\n \n import scanpy.external as sce\n \n-If you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_!\n+If you'd like to include a tool here, consider making a pull request (:doc:`instructions <../dev/external-tools>`).\n+If the tool already uses `scanpy` or `anndata`, it may fit better in the :doc:`ecosystem page <../ecosystem>`.\n \n Preprocessing: PP\n ------------------\n", "issue": "Increase visibility of ecosystem page\nAs discussed at last meeting\r\n\r\n- [ ] Document process for adding entries (note this on top of ecosystem page)\r\n- [ ] Link from external\r\n- [ ] Clarify goals/ differences b/w ecosystem and external\n", "before_files": [{"content": "from . import tl\nfrom . import pl\nfrom . import pp\nfrom . import exporting\n\nimport sys\nfrom .. import _utils\n\n_utils.annotate_doc_types(sys.modules[__name__], 'scanpy')\ndel sys, _utils\n\n\n__doc__ = \"\"\"\\\nExternal API\n============\n\n\nImport Scanpy's wrappers to external tools as::\n\n import scanpy.external as sce\n\nIf you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_!\n\nPreprocessing: PP\n------------------\n\nData integration\n~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.bbknn\n pp.harmony_integrate\n pp.mnn_correct\n pp.scanorama_integrate\n\n\nSample demultiplexing, Doublet detection\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.scrublet\n pp.scrublet_simulate_doublets\n pl.scrublet_score_distribution\n pp.hashsolo\n\nImputation\n~~~~~~~~~~\n\nNote that the fundamental limitations of imputation are still under `debate\n<https://github.com/theislab/scanpy/issues/189>`__.\n\n.. autosummary::\n :toctree: .\n\n pp.dca\n pp.magic\n\n\nTools: TL\n----------\n\nEmbeddings\n~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phate\n tl.palantir\n tl.trimap\n tl.sam\n\nClustering and trajectory inference\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phenograph\n tl.harmony_timeseries\n tl.wishbone\n tl.palantir\n tl.palantir_results\n\nGene scores, Cell cycle\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.sandbag\n tl.cyclone\n\n\nPlotting: PL\n------------\n\n.. autosummary::\n :toctree: .\n\n pl.phate\n pl.trimap\n pl.sam\n pl.wishbone_marker_trajectory\n\nExporting\n---------\n\n.. autosummary::\n :toctree: .\n\n exporting.spring_project\n exporting.cellbrowser\n\"\"\"\n", "path": "scanpy/external/__init__.py"}], "after_files": [{"content": "from . import tl\nfrom . import pl\nfrom . import pp\nfrom . import exporting\n\nimport sys\nfrom .. import _utils\n\n_utils.annotate_doc_types(sys.modules[__name__], 'scanpy')\ndel sys, _utils\n\n\n__doc__ = \"\"\"\\\nExternal API\n============\n\n.. note::\n More tools that integrate well with scanpy and anndata can be found on the :doc:`ecosystem page <../ecosystem>`.\n\nImport Scanpy's wrappers to external tools as::\n\n import scanpy.external as sce\n\nIf you'd like to include a tool here, consider making a pull request (:doc:`instructions <../dev/external-tools>`).\nIf the tool already uses `scanpy` or `anndata`, it may fit better in the :doc:`ecosystem page <../ecosystem>`.\n\nPreprocessing: PP\n------------------\n\nData integration\n~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.bbknn\n pp.harmony_integrate\n pp.mnn_correct\n pp.scanorama_integrate\n\n\nSample demultiplexing, Doublet detection\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.scrublet\n pp.scrublet_simulate_doublets\n pl.scrublet_score_distribution\n pp.hashsolo\n\nImputation\n~~~~~~~~~~\n\nNote that the fundamental limitations of imputation are still under `debate\n<https://github.com/theislab/scanpy/issues/189>`__.\n\n.. autosummary::\n :toctree: .\n\n pp.dca\n pp.magic\n\n\nTools: TL\n----------\n\nEmbeddings\n~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phate\n tl.palantir\n tl.trimap\n tl.sam\n\nClustering and trajectory inference\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phenograph\n tl.harmony_timeseries\n tl.wishbone\n tl.palantir\n tl.palantir_results\n\nGene scores, Cell cycle\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.sandbag\n tl.cyclone\n\n\nPlotting: PL\n------------\n\n.. autosummary::\n :toctree: .\n\n pl.phate\n pl.trimap\n pl.sam\n pl.wishbone_marker_trajectory\n\nExporting\n---------\n\n.. autosummary::\n :toctree: .\n\n exporting.spring_project\n exporting.cellbrowser\n\"\"\"\n", "path": "scanpy/external/__init__.py"}]}
1,087
227
gh_patches_debug_18187
rasdani/github-patches
git_diff
praw-dev__praw-782
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Explain "PRAW is not thread safe" in the documentation. It would be great to provide an example of why it is not thread safe. Relevant comment: https://www.reddit.com/r/redditdev/comments/63ugl5/praw_mulitprocessing_handler_prawhandler_is_not_a/dfx9oet/?context=3 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 """praw setup.py""" 2 3 import re 4 from codecs import open 5 from os import path 6 from setuptools import find_packages, setup 7 8 9 PACKAGE_NAME = 'praw' 10 HERE = path.abspath(path.dirname(__file__)) 11 with open(path.join(HERE, 'README.rst'), encoding='utf-8') as fp: 12 README = fp.read() 13 with open(path.join(HERE, PACKAGE_NAME, 'const.py'), 14 encoding='utf-8') as fp: 15 VERSION = re.search("__version__ = '([^']+)'", fp.read()).group(1) 16 17 18 setup(name=PACKAGE_NAME, 19 author='Bryce Boe', 20 author_email='bbzbryce@gmail.com', 21 classifiers=[ 22 'Development Status :: 5 - Production/Stable', 23 'Environment :: Console', 24 'Intended Audience :: Developers', 25 'License :: OSI Approved :: BSD License', 26 'Natural Language :: English', 27 'Operating System :: OS Independent', 28 'Programming Language :: Python', 29 'Programming Language :: Python :: 2.7', 30 'Programming Language :: Python :: 3', 31 'Programming Language :: Python :: 3.3', 32 'Programming Language :: Python :: 3.4', 33 'Programming Language :: Python :: 3.5', 34 'Programming Language :: Python :: 3.6', 35 'Programming Language :: Python :: Implementation :: CPython', 36 'Topic :: Utilities'], 37 description=('PRAW, an acronym for `Python Reddit API Wrapper`, is a ' 38 'python package that allows for simple access to ' 39 'reddit\'s API.'), 40 install_requires=['prawcore >=0.9.0, <0.10', 41 'update_checker >=0.16'], 42 keywords='reddit api wrapper', 43 license='Simplified BSD License', 44 long_description=README, 45 package_data={'': ['LICENSE.txt'], PACKAGE_NAME: ['*.ini']}, 46 packages=find_packages(exclude=['tests', 'tests.*']), 47 setup_requires=['pytest-runner >=2.1'], 48 tests_require=['betamax >=0.8, <0.9', 49 'betamax-matchers >=0.3.0, <0.4', 50 'betamax-serializers >=0.2, <0.3', 51 'mock >=0.8', 52 'pytest >=2.7.3', 53 'six >=1.10'], 54 test_suite='tests', 55 url='https://praw.readthedocs.org/', 56 version=VERSION) 57 ``` Path: `docs/conf.py` Content: ``` 1 import os 2 import sys 3 sys.path.insert(0, '..') 4 5 from praw import __version__ 6 7 copyright = '2016, Bryce Boe' 8 exclude_patterns = ['_build'] 9 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] 10 html_static_path = ['_static'] 11 html_theme = 'sphinx_rtd_theme' 12 html_theme_options = { 13 'collapse_navigation': True 14 } 15 html_use_smartypants = True 16 htmlhelp_basename = 'PRAW' 17 intersphinx_mapping = {'python': ('https://docs.python.org/3.6', None)} 18 master_doc = 'index' 19 nitpicky = True 20 project = 'PRAW' 21 pygments_style = 'sphinx' 22 release = __version__ 23 source_suffix = '.rst' 24 suppress_warnings = ['image.nonlocal_uri'] 25 version = '.'.join(__version__.split('.', 2)[:2]) 26 27 28 # Use RTD theme locally 29 if not os.environ.get('READTHEDOCS'): 30 import sphinx_rtd_theme 31 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 32 33 34 def skip(app, what, name, obj, skip, options): 35 if name in {'__call__', '__contains__', '__getitem__', '__init__', 36 '__iter__', '__len__'}: 37 return False 38 return skip 39 40 41 def setup(app): 42 app.connect('autodoc-skip-member', skip) 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -4,7 +4,7 @@ from praw import __version__ -copyright = '2016, Bryce Boe' +copyright = '2017, Bryce Boe' exclude_patterns = ['_build'] extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] html_static_path = ['_static'] diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -37,7 +37,7 @@ description=('PRAW, an acronym for `Python Reddit API Wrapper`, is a ' 'python package that allows for simple access to ' 'reddit\'s API.'), - install_requires=['prawcore >=0.9.0, <0.10', + install_requires=['prawcore >=0.10.1, <0.11', 'update_checker >=0.16'], keywords='reddit api wrapper', license='Simplified BSD License',
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -4,7 +4,7 @@\n \n from praw import __version__\n \n-copyright = '2016, Bryce Boe'\n+copyright = '2017, Bryce Boe'\n exclude_patterns = ['_build']\n extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']\n html_static_path = ['_static']\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,7 +37,7 @@\n description=('PRAW, an acronym for `Python Reddit API Wrapper`, is a '\n 'python package that allows for simple access to '\n 'reddit\\'s API.'),\n- install_requires=['prawcore >=0.9.0, <0.10',\n+ install_requires=['prawcore >=0.10.1, <0.11',\n 'update_checker >=0.16'],\n keywords='reddit api wrapper',\n license='Simplified BSD License',\n", "issue": "Explain \"PRAW is not thread safe\" in the documentation.\nIt would be great to provide an example of why it is not thread safe.\r\n\r\nRelevant comment:\r\n\r\nhttps://www.reddit.com/r/redditdev/comments/63ugl5/praw_mulitprocessing_handler_prawhandler_is_not_a/dfx9oet/?context=3\n", "before_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\nfrom setuptools import find_packages, setup\n\n\nPACKAGE_NAME = 'praw'\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, 'README.rst'), encoding='utf-8') as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, 'const.py'),\n encoding='utf-8') as fp:\n VERSION = re.search(\"__version__ = '([^']+)'\", fp.read()).group(1)\n\n\nsetup(name=PACKAGE_NAME,\n author='Bryce Boe',\n author_email='bbzbryce@gmail.com',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Utilities'],\n description=('PRAW, an acronym for `Python Reddit API Wrapper`, is a '\n 'python package that allows for simple access to '\n 'reddit\\'s API.'),\n install_requires=['prawcore >=0.9.0, <0.10',\n 'update_checker >=0.16'],\n keywords='reddit api wrapper',\n license='Simplified BSD License',\n long_description=README,\n package_data={'': ['LICENSE.txt'], PACKAGE_NAME: ['*.ini']},\n packages=find_packages(exclude=['tests', 'tests.*']),\n setup_requires=['pytest-runner >=2.1'],\n tests_require=['betamax >=0.8, <0.9',\n 'betamax-matchers >=0.3.0, <0.4',\n 'betamax-serializers >=0.2, <0.3',\n 'mock >=0.8',\n 'pytest >=2.7.3',\n 'six >=1.10'],\n test_suite='tests',\n url='https://praw.readthedocs.org/',\n version=VERSION)\n", "path": "setup.py"}, {"content": "import os\nimport sys\nsys.path.insert(0, '..')\n\nfrom praw import __version__\n\ncopyright = '2016, Bryce Boe'\nexclude_patterns = ['_build']\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']\nhtml_static_path = ['_static']\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_options = {\n 'collapse_navigation': True\n}\nhtml_use_smartypants = True\nhtmlhelp_basename = 'PRAW'\nintersphinx_mapping = {'python': ('https://docs.python.org/3.6', None)}\nmaster_doc = 'index'\nnitpicky = True\nproject = 'PRAW'\npygments_style = 'sphinx'\nrelease = __version__\nsource_suffix = '.rst'\nsuppress_warnings = ['image.nonlocal_uri']\nversion = '.'.join(__version__.split('.', 2)[:2])\n\n\n# Use RTD theme locally\nif not os.environ.get('READTHEDOCS'):\n import sphinx_rtd_theme\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\ndef skip(app, what, name, obj, skip, options):\n if name in {'__call__', '__contains__', '__getitem__', '__init__',\n '__iter__', '__len__'}:\n return False\n return skip\n\n\ndef setup(app):\n app.connect('autodoc-skip-member', skip)\n", "path": "docs/conf.py"}], "after_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\nfrom setuptools import find_packages, setup\n\n\nPACKAGE_NAME = 'praw'\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, 'README.rst'), encoding='utf-8') as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, 'const.py'),\n encoding='utf-8') as fp:\n VERSION = re.search(\"__version__ = '([^']+)'\", fp.read()).group(1)\n\n\nsetup(name=PACKAGE_NAME,\n author='Bryce Boe',\n author_email='bbzbryce@gmail.com',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Utilities'],\n description=('PRAW, an acronym for `Python Reddit API Wrapper`, is a '\n 'python package that allows for simple access to '\n 'reddit\\'s API.'),\n install_requires=['prawcore >=0.10.1, <0.11',\n 'update_checker >=0.16'],\n keywords='reddit api wrapper',\n license='Simplified BSD License',\n long_description=README,\n package_data={'': ['LICENSE.txt'], PACKAGE_NAME: ['*.ini']},\n packages=find_packages(exclude=['tests', 'tests.*']),\n setup_requires=['pytest-runner >=2.1'],\n tests_require=['betamax >=0.8, <0.9',\n 'betamax-matchers >=0.3.0, <0.4',\n 'betamax-serializers >=0.2, <0.3',\n 'mock >=0.8',\n 'pytest >=2.7.3',\n 'six >=1.10'],\n test_suite='tests',\n url='https://praw.readthedocs.org/',\n version=VERSION)\n", "path": "setup.py"}, {"content": "import os\nimport sys\nsys.path.insert(0, '..')\n\nfrom praw import __version__\n\ncopyright = '2017, Bryce Boe'\nexclude_patterns = ['_build']\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']\nhtml_static_path = ['_static']\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_options = {\n 'collapse_navigation': True\n}\nhtml_use_smartypants = True\nhtmlhelp_basename = 'PRAW'\nintersphinx_mapping = {'python': ('https://docs.python.org/3.6', None)}\nmaster_doc = 'index'\nnitpicky = True\nproject = 'PRAW'\npygments_style = 'sphinx'\nrelease = __version__\nsource_suffix = '.rst'\nsuppress_warnings = ['image.nonlocal_uri']\nversion = '.'.join(__version__.split('.', 2)[:2])\n\n\n# Use RTD theme locally\nif not os.environ.get('READTHEDOCS'):\n import sphinx_rtd_theme\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\ndef skip(app, what, name, obj, skip, options):\n if name in {'__call__', '__contains__', '__getitem__', '__init__',\n '__iter__', '__len__'}:\n return False\n return skip\n\n\ndef setup(app):\n app.connect('autodoc-skip-member', skip)\n", "path": "docs/conf.py"}]}
1,350
237
gh_patches_debug_24913
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-6564
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- bupa_gb spider is including closed branches The bupa_gb.py spider is returning a number of closed practices, as these still have web pages and are still listed in the sitemap. Current examples include: https://www.bupa.co.uk/dental/dental-care/practices/harleston https://www.bupa.co.uk/dental/dental-care/practices/leckhampton I think these can be reliably detected by checking if the name ends (case-insensitively) with "closed". So I'd suggest we drop any whose name matches /closed$/i. There are about 12 of these in total out of 388 branches. (Sorry, I can no longer run the code on my computer to implement and test this myself, since I'm unable to install a recent enough version of Python.) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/items.py` Content: ``` 1 # Define here the models for your scraped items 2 # 3 # See documentation in: 4 # http://doc.scrapy.org/en/latest/topics/items.html 5 6 import scrapy 7 8 9 class Feature(scrapy.Item): 10 lat = scrapy.Field() 11 lon = scrapy.Field() 12 geometry = scrapy.Field() 13 name = scrapy.Field() 14 branch = scrapy.Field() 15 addr_full = scrapy.Field() 16 housenumber = scrapy.Field() 17 street = scrapy.Field() 18 street_address = scrapy.Field() 19 city = scrapy.Field() 20 state = scrapy.Field() 21 postcode = scrapy.Field() 22 country = scrapy.Field() 23 phone = scrapy.Field() 24 email = scrapy.Field() 25 website = scrapy.Field() 26 twitter = scrapy.Field() 27 facebook = scrapy.Field() 28 opening_hours = scrapy.Field() 29 image = scrapy.Field() 30 ref = scrapy.Field() 31 brand = scrapy.Field() 32 brand_wikidata = scrapy.Field() 33 operator = scrapy.Field() 34 operator_wikidata = scrapy.Field() 35 located_in = scrapy.Field() 36 located_in_wikidata = scrapy.Field() 37 nsi_id = scrapy.Field() 38 extras = scrapy.Field() 39 40 def __init__(self, *args, **kwargs): 41 super().__init__(*args, **kwargs) 42 if not self._values.get("extras"): 43 self.__setitem__("extras", {}) 44 45 46 def get_lat_lon(item: Feature) -> (float, float): 47 if geometry := item.get("geometry"): 48 if isinstance(geometry, dict): 49 if geometry.get("type") == "Point": 50 if coords := geometry.get("coordinates"): 51 try: 52 return float(coords[1]), float(coords[0]) 53 except (TypeError, ValueError): 54 item["geometry"] = None 55 else: 56 try: 57 return float(item.get("lat")), float(item.get("lon")) 58 except (TypeError, ValueError): 59 pass 60 return None 61 62 63 def set_lat_lon(item: Feature, lat: float, lon: float): 64 item.pop("lat", None) 65 item.pop("lon", None) 66 if lat and lon: 67 item["geometry"] = { 68 "type": "Point", 69 "coordinates": [lon, lat], 70 } 71 else: 72 item["geometry"] = None 73 74 75 def add_social_media(item: Feature, service: str, account: str): 76 service = service.lower() 77 if service in item.fields: 78 item[service] = account 79 else: 80 item["extras"][f"contact:{service}"] = account 81 ``` Path: `locations/spiders/bupa_gb.py` Content: ``` 1 from scrapy.spiders import SitemapSpider 2 3 from locations.categories import Categories 4 from locations.structured_data_spider import StructuredDataSpider 5 6 7 class BupaGBSpider(SitemapSpider, StructuredDataSpider): 8 name = "bupa_gb" 9 item_attributes = {"brand": "Bupa", "brand_wikidata": "Q931628", "extras": Categories.DENTIST.value} 10 sitemap_urls = ["https://www.bupa.co.uk/robots.txt"] 11 sitemap_rules = [(r"/practices/([-\w]+)$", "parse_sd")] 12 13 def post_process_item(self, item, response, ld_data, **kwargs): 14 if "Total Dental Care" in item["name"]: 15 item["brand"] = "Total Dental Care" 16 yield item 17 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/items.py b/locations/items.py --- a/locations/items.py +++ b/locations/items.py @@ -2,6 +2,7 @@ # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html +from datetime import datetime import scrapy @@ -78,3 +79,7 @@ item[service] = account else: item["extras"][f"contact:{service}"] = account + + +def set_closed(item: Feature, end_date: datetime = None): + item["extras"]["end_date"] = end_date.strftime("%Y-%m-%d") if end_date else "yes" diff --git a/locations/spiders/bupa_gb.py b/locations/spiders/bupa_gb.py --- a/locations/spiders/bupa_gb.py +++ b/locations/spiders/bupa_gb.py @@ -1,6 +1,7 @@ from scrapy.spiders import SitemapSpider from locations.categories import Categories +from locations.items import set_closed from locations.structured_data_spider import StructuredDataSpider @@ -13,4 +14,8 @@ def post_process_item(self, item, response, ld_data, **kwargs): if "Total Dental Care" in item["name"]: item["brand"] = "Total Dental Care" + + if item["name"].lower().endswith(" - closed"): + set_closed(item) + yield item
{"golden_diff": "diff --git a/locations/items.py b/locations/items.py\n--- a/locations/items.py\n+++ b/locations/items.py\n@@ -2,6 +2,7 @@\n #\n # See documentation in:\n # http://doc.scrapy.org/en/latest/topics/items.html\n+from datetime import datetime\n \n import scrapy\n \n@@ -78,3 +79,7 @@\n item[service] = account\n else:\n item[\"extras\"][f\"contact:{service}\"] = account\n+\n+\n+def set_closed(item: Feature, end_date: datetime = None):\n+ item[\"extras\"][\"end_date\"] = end_date.strftime(\"%Y-%m-%d\") if end_date else \"yes\"\ndiff --git a/locations/spiders/bupa_gb.py b/locations/spiders/bupa_gb.py\n--- a/locations/spiders/bupa_gb.py\n+++ b/locations/spiders/bupa_gb.py\n@@ -1,6 +1,7 @@\n from scrapy.spiders import SitemapSpider\n \n from locations.categories import Categories\n+from locations.items import set_closed\n from locations.structured_data_spider import StructuredDataSpider\n \n \n@@ -13,4 +14,8 @@\n def post_process_item(self, item, response, ld_data, **kwargs):\n if \"Total Dental Care\" in item[\"name\"]:\n item[\"brand\"] = \"Total Dental Care\"\n+\n+ if item[\"name\"].lower().endswith(\" - closed\"):\n+ set_closed(item)\n+\n yield item\n", "issue": "bupa_gb spider is including closed branches\nThe bupa_gb.py spider is returning a number of closed practices, as these still have web pages and are still listed in the sitemap. Current examples include:\r\n\r\nhttps://www.bupa.co.uk/dental/dental-care/practices/harleston\r\nhttps://www.bupa.co.uk/dental/dental-care/practices/leckhampton\r\n\r\nI think these can be reliably detected by checking if the name ends (case-insensitively) with \"closed\". So I'd suggest we drop any whose name matches /closed$/i. There are about 12 of these in total out of 388 branches.\r\n\r\n(Sorry, I can no longer run the code on my computer to implement and test this myself, since I'm unable to install a recent enough version of Python.)\n", "before_files": [{"content": "# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass Feature(scrapy.Item):\n lat = scrapy.Field()\n lon = scrapy.Field()\n geometry = scrapy.Field()\n name = scrapy.Field()\n branch = scrapy.Field()\n addr_full = scrapy.Field()\n housenumber = scrapy.Field()\n street = scrapy.Field()\n street_address = scrapy.Field()\n city = scrapy.Field()\n state = scrapy.Field()\n postcode = scrapy.Field()\n country = scrapy.Field()\n phone = scrapy.Field()\n email = scrapy.Field()\n website = scrapy.Field()\n twitter = scrapy.Field()\n facebook = scrapy.Field()\n opening_hours = scrapy.Field()\n image = scrapy.Field()\n ref = scrapy.Field()\n brand = scrapy.Field()\n brand_wikidata = scrapy.Field()\n operator = scrapy.Field()\n operator_wikidata = scrapy.Field()\n located_in = scrapy.Field()\n located_in_wikidata = scrapy.Field()\n nsi_id = scrapy.Field()\n extras = scrapy.Field()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self._values.get(\"extras\"):\n self.__setitem__(\"extras\", {})\n\n\ndef get_lat_lon(item: Feature) -> (float, float):\n if geometry := item.get(\"geometry\"):\n if isinstance(geometry, dict):\n if geometry.get(\"type\") == \"Point\":\n if coords := geometry.get(\"coordinates\"):\n try:\n return float(coords[1]), float(coords[0])\n except (TypeError, ValueError):\n item[\"geometry\"] = None\n else:\n try:\n return float(item.get(\"lat\")), float(item.get(\"lon\"))\n except (TypeError, ValueError):\n pass\n return None\n\n\ndef set_lat_lon(item: Feature, lat: float, lon: float):\n item.pop(\"lat\", None)\n item.pop(\"lon\", None)\n if lat and lon:\n item[\"geometry\"] = {\n \"type\": \"Point\",\n \"coordinates\": [lon, lat],\n }\n else:\n item[\"geometry\"] = None\n\n\ndef add_social_media(item: Feature, service: str, account: str):\n service = service.lower()\n if service in item.fields:\n item[service] = account\n else:\n item[\"extras\"][f\"contact:{service}\"] = account\n", "path": "locations/items.py"}, {"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.categories import Categories\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass BupaGBSpider(SitemapSpider, StructuredDataSpider):\n name = \"bupa_gb\"\n item_attributes = {\"brand\": \"Bupa\", \"brand_wikidata\": \"Q931628\", \"extras\": Categories.DENTIST.value}\n sitemap_urls = [\"https://www.bupa.co.uk/robots.txt\"]\n sitemap_rules = [(r\"/practices/([-\\w]+)$\", \"parse_sd\")]\n\n def post_process_item(self, item, response, ld_data, **kwargs):\n if \"Total Dental Care\" in item[\"name\"]:\n item[\"brand\"] = \"Total Dental Care\"\n yield item\n", "path": "locations/spiders/bupa_gb.py"}], "after_files": [{"content": "# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\nfrom datetime import datetime\n\nimport scrapy\n\n\nclass Feature(scrapy.Item):\n lat = scrapy.Field()\n lon = scrapy.Field()\n geometry = scrapy.Field()\n name = scrapy.Field()\n branch = scrapy.Field()\n addr_full = scrapy.Field()\n housenumber = scrapy.Field()\n street = scrapy.Field()\n street_address = scrapy.Field()\n city = scrapy.Field()\n state = scrapy.Field()\n postcode = scrapy.Field()\n country = scrapy.Field()\n phone = scrapy.Field()\n email = scrapy.Field()\n website = scrapy.Field()\n twitter = scrapy.Field()\n facebook = scrapy.Field()\n opening_hours = scrapy.Field()\n image = scrapy.Field()\n ref = scrapy.Field()\n brand = scrapy.Field()\n brand_wikidata = scrapy.Field()\n operator = scrapy.Field()\n operator_wikidata = scrapy.Field()\n located_in = scrapy.Field()\n located_in_wikidata = scrapy.Field()\n nsi_id = scrapy.Field()\n extras = scrapy.Field()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self._values.get(\"extras\"):\n self.__setitem__(\"extras\", {})\n\n\ndef get_lat_lon(item: Feature) -> (float, float):\n if geometry := item.get(\"geometry\"):\n if isinstance(geometry, dict):\n if geometry.get(\"type\") == \"Point\":\n if coords := geometry.get(\"coordinates\"):\n try:\n return float(coords[1]), float(coords[0])\n except (TypeError, ValueError):\n item[\"geometry\"] = None\n else:\n try:\n return float(item.get(\"lat\")), float(item.get(\"lon\"))\n except (TypeError, ValueError):\n pass\n return None\n\n\ndef set_lat_lon(item: Feature, lat: float, lon: float):\n item.pop(\"lat\", None)\n item.pop(\"lon\", None)\n if lat and lon:\n item[\"geometry\"] = {\n \"type\": \"Point\",\n \"coordinates\": [lon, lat],\n }\n else:\n item[\"geometry\"] = None\n\n\ndef add_social_media(item: Feature, service: str, account: str):\n service = service.lower()\n if service in item.fields:\n item[service] = account\n else:\n item[\"extras\"][f\"contact:{service}\"] = account\n\n\ndef set_closed(item: Feature, end_date: datetime = None):\n item[\"extras\"][\"end_date\"] = end_date.strftime(\"%Y-%m-%d\") if end_date else \"yes\"\n", "path": "locations/items.py"}, {"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.categories import Categories\nfrom locations.items import set_closed\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass BupaGBSpider(SitemapSpider, StructuredDataSpider):\n name = \"bupa_gb\"\n item_attributes = {\"brand\": \"Bupa\", \"brand_wikidata\": \"Q931628\", \"extras\": Categories.DENTIST.value}\n sitemap_urls = [\"https://www.bupa.co.uk/robots.txt\"]\n sitemap_rules = [(r\"/practices/([-\\w]+)$\", \"parse_sd\")]\n\n def post_process_item(self, item, response, ld_data, **kwargs):\n if \"Total Dental Care\" in item[\"name\"]:\n item[\"brand\"] = \"Total Dental Care\"\n\n if item[\"name\"].lower().endswith(\" - closed\"):\n set_closed(item)\n\n yield item\n", "path": "locations/spiders/bupa_gb.py"}]}
1,318
318
gh_patches_debug_32
rasdani/github-patches
git_diff
mlflow__mlflow-2797
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [SETUP-BUG] ResolvePackageNotFound - python=3.5.2 Thank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md) for information on what types of issues we address. Please fill in this template and do not delete it unless you are sure your issue is outside its scope. ### System information - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 16.04 - **MLflow installed from (source or binary)**: binary (pip install mlflow) - **MLflow version (run ``mlflow --version``)**: 1.2.0 - **Python version**: 3.5.2 - **Exact command to reproduce**: mlflow models build-docker -m /path/to/model -n "my-model" ### Describe the problem mlflow models build-docker -m /path/to/model -n "my-model" ### Other info / logs Warning: you have pip-installed dependencies in your environment file, but you do not list pip itself as one of your condadependencies. Conda may not use the correct pip to install your packages, and they may end up in the wrong place. Pleaseadd an explicit pip dependency. I'm adding one for you, but still nagging you. Collecting package metadata (repodata.json): ...working... done Solving environment: ...working... failed ResolvePackageNotFound: - python=3.5.2 Traceback (most recent call last): File "<string>", line 1, in <module> File "/miniconda/lib/python3.7/site-packages/mlflow/models/container/__init__.py", line 102, in _install_pyfunc_deps raise Exception("Failed to create model environment.") Exception: Failed to create model environment. creating and activating custom environment The command '/bin/sh -c python -c 'from mlflow.models.container import _install_pyfunc_deps; _install_pyfunc_deps("/opt/ml/model", install_mlflow=False)'' returned a non-zero code: 1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mlflow/utils/environment.py` Content: ``` 1 import yaml 2 3 from mlflow.utils import PYTHON_VERSION 4 5 _conda_header = """\ 6 name: mlflow-env 7 channels: 8 - defaults 9 """ 10 11 12 def _mlflow_conda_env(path=None, additional_conda_deps=None, additional_pip_deps=None, 13 additional_conda_channels=None, install_mlflow=True): 14 """ 15 Creates a Conda environment with the specified package channels and dependencies. If there are 16 any pip dependencies, including from the install_mlflow parameter, then pip will be added to 17 the conda dependencies. This is done to ensure that the pip inside the conda environment is 18 used to install the pip dependencies. 19 20 :param path: Local filesystem path where the conda env file is to be written. If unspecified, 21 the conda env will not be written to the filesystem; it will still be returned 22 in dictionary format. 23 :param additional_conda_deps: List of additional conda dependencies passed as strings. 24 :param additional_pip_deps: List of additional pip dependencies passed as strings. 25 :param additional_conda_channels: List of additional conda channels to search when resolving 26 packages. 27 :return: ``None`` if ``path`` is specified. Otherwise, the a dictionary representation of the 28 Conda environment. 29 """ 30 pip_deps = (["mlflow"] if install_mlflow else []) + ( 31 additional_pip_deps if additional_pip_deps else []) 32 conda_deps = (additional_conda_deps if additional_conda_deps else []) + ( 33 ["pip"] if pip_deps else []) 34 35 env = yaml.safe_load(_conda_header) 36 env["dependencies"] = ["python={}".format(PYTHON_VERSION)] 37 if conda_deps is not None: 38 env["dependencies"] += conda_deps 39 env["dependencies"].append({"pip": pip_deps}) 40 if additional_conda_channels is not None: 41 env["channels"] += additional_conda_channels 42 43 if path is not None: 44 with open(path, "w") as out: 45 yaml.safe_dump(env, stream=out, default_flow_style=False) 46 return None 47 else: 48 return env 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mlflow/utils/environment.py b/mlflow/utils/environment.py --- a/mlflow/utils/environment.py +++ b/mlflow/utils/environment.py @@ -6,6 +6,7 @@ name: mlflow-env channels: - defaults + - conda-forge """
{"golden_diff": "diff --git a/mlflow/utils/environment.py b/mlflow/utils/environment.py\n--- a/mlflow/utils/environment.py\n+++ b/mlflow/utils/environment.py\n@@ -6,6 +6,7 @@\n name: mlflow-env\n channels:\n - defaults\n+ - conda-forge\n \"\"\"\n", "issue": "[SETUP-BUG] ResolvePackageNotFound - python=3.5.2\nThank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)\r\nfor information on what types of issues we address.\r\n \r\nPlease fill in this template and do not delete it unless you are sure your issue is outside its scope.\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 16.04\r\n- **MLflow installed from (source or binary)**: binary (pip install mlflow)\r\n- **MLflow version (run ``mlflow --version``)**: 1.2.0\r\n- **Python version**: 3.5.2\r\n- **Exact command to reproduce**: mlflow models build-docker -m /path/to/model -n \"my-model\" \r\n\r\n### Describe the problem\r\nmlflow models build-docker -m /path/to/model -n \"my-model\"\r\n\r\n### Other info / logs\r\nWarning: you have pip-installed dependencies in your environment file, but you do not list pip itself as one of your condadependencies. Conda may not use the correct pip to install your packages, and they may end up in the wrong place. Pleaseadd an explicit pip dependency. I'm adding one for you, but still nagging you.\r\nCollecting package metadata (repodata.json): ...working... done\r\nSolving environment: ...working... failed\r\n\r\nResolvePackageNotFound:\r\n - python=3.5.2\r\n\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/miniconda/lib/python3.7/site-packages/mlflow/models/container/__init__.py\", line 102, in _install_pyfunc_deps\r\n raise Exception(\"Failed to create model environment.\")\r\nException: Failed to create model environment.\r\ncreating and activating custom environment\r\nThe command '/bin/sh -c python -c 'from mlflow.models.container import _install_pyfunc_deps; _install_pyfunc_deps(\"/opt/ml/model\", install_mlflow=False)'' returned a non-zero code: 1\r\n\n", "before_files": [{"content": "import yaml\n\nfrom mlflow.utils import PYTHON_VERSION\n\n_conda_header = \"\"\"\\\nname: mlflow-env\nchannels:\n - defaults\n\"\"\"\n\n\ndef _mlflow_conda_env(path=None, additional_conda_deps=None, additional_pip_deps=None,\n additional_conda_channels=None, install_mlflow=True):\n \"\"\"\n Creates a Conda environment with the specified package channels and dependencies. If there are\n any pip dependencies, including from the install_mlflow parameter, then pip will be added to\n the conda dependencies. This is done to ensure that the pip inside the conda environment is\n used to install the pip dependencies.\n\n :param path: Local filesystem path where the conda env file is to be written. If unspecified,\n the conda env will not be written to the filesystem; it will still be returned\n in dictionary format.\n :param additional_conda_deps: List of additional conda dependencies passed as strings.\n :param additional_pip_deps: List of additional pip dependencies passed as strings.\n :param additional_conda_channels: List of additional conda channels to search when resolving\n packages.\n :return: ``None`` if ``path`` is specified. Otherwise, the a dictionary representation of the\n Conda environment.\n \"\"\"\n pip_deps = ([\"mlflow\"] if install_mlflow else []) + (\n additional_pip_deps if additional_pip_deps else [])\n conda_deps = (additional_conda_deps if additional_conda_deps else []) + (\n [\"pip\"] if pip_deps else [])\n\n env = yaml.safe_load(_conda_header)\n env[\"dependencies\"] = [\"python={}\".format(PYTHON_VERSION)]\n if conda_deps is not None:\n env[\"dependencies\"] += conda_deps\n env[\"dependencies\"].append({\"pip\": pip_deps})\n if additional_conda_channels is not None:\n env[\"channels\"] += additional_conda_channels\n\n if path is not None:\n with open(path, \"w\") as out:\n yaml.safe_dump(env, stream=out, default_flow_style=False)\n return None\n else:\n return env\n", "path": "mlflow/utils/environment.py"}], "after_files": [{"content": "import yaml\n\nfrom mlflow.utils import PYTHON_VERSION\n\n_conda_header = \"\"\"\\\nname: mlflow-env\nchannels:\n - defaults\n - conda-forge\n\"\"\"\n\n\ndef _mlflow_conda_env(path=None, additional_conda_deps=None, additional_pip_deps=None,\n additional_conda_channels=None, install_mlflow=True):\n \"\"\"\n Creates a Conda environment with the specified package channels and dependencies. If there are\n any pip dependencies, including from the install_mlflow parameter, then pip will be added to\n the conda dependencies. This is done to ensure that the pip inside the conda environment is\n used to install the pip dependencies.\n\n :param path: Local filesystem path where the conda env file is to be written. If unspecified,\n the conda env will not be written to the filesystem; it will still be returned\n in dictionary format.\n :param additional_conda_deps: List of additional conda dependencies passed as strings.\n :param additional_pip_deps: List of additional pip dependencies passed as strings.\n :param additional_conda_channels: List of additional conda channels to search when resolving\n packages.\n :return: ``None`` if ``path`` is specified. Otherwise, the a dictionary representation of the\n Conda environment.\n \"\"\"\n pip_deps = ([\"mlflow\"] if install_mlflow else []) + (\n additional_pip_deps if additional_pip_deps else [])\n conda_deps = (additional_conda_deps if additional_conda_deps else []) + (\n [\"pip\"] if pip_deps else [])\n\n env = yaml.safe_load(_conda_header)\n env[\"dependencies\"] = [\"python={}\".format(PYTHON_VERSION)]\n if conda_deps is not None:\n env[\"dependencies\"] += conda_deps\n env[\"dependencies\"].append({\"pip\": pip_deps})\n if additional_conda_channels is not None:\n env[\"channels\"] += additional_conda_channels\n\n if path is not None:\n with open(path, \"w\") as out:\n yaml.safe_dump(env, stream=out, default_flow_style=False)\n return None\n else:\n return env\n", "path": "mlflow/utils/environment.py"}]}
1,264
63
gh_patches_debug_37559
rasdani/github-patches
git_diff
yt-dlp__yt-dlp-1202
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Site Request] 7plus add login/pass to access 720p resolution <!-- ###################################################################### WARNING! IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE ###################################################################### --> ## Checklist <!-- Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp: - First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.09.25. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED. - Search the bugtracker for similar site feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates. - Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space) --> - [x] I'm reporting a site feature request - [x] I've verified that I'm running yt-dlp version **2021.09.25** - [x] I've searched the bugtracker for similar site feature requests including closed ones ## Description <!-- Provide an explanation of your site feature request in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible. --> 7Plus has 720p resolution available behind a login/pass. Current site functionality only pulls 540p. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `yt_dlp/extractor/sevenplus.py` Content: ``` 1 # coding: utf-8 2 from __future__ import unicode_literals 3 4 import re 5 6 from .brightcove import BrightcoveNewIE 7 from ..compat import ( 8 compat_HTTPError, 9 compat_str, 10 ) 11 from ..utils import ( 12 ExtractorError, 13 try_get, 14 update_url_query, 15 ) 16 17 18 class SevenPlusIE(BrightcoveNewIE): 19 IE_NAME = '7plus' 20 _VALID_URL = r'https?://(?:www\.)?7plus\.com\.au/(?P<path>[^?]+\?.*?\bepisode-id=(?P<id>[^&#]+))' 21 _TESTS = [{ 22 'url': 'https://7plus.com.au/MTYS?episode-id=MTYS7-003', 23 'info_dict': { 24 'id': 'MTYS7-003', 25 'ext': 'mp4', 26 'title': 'S7 E3 - Wind Surf', 27 'description': 'md5:29c6a69f21accda7601278f81b46483d', 28 'uploader_id': '5303576322001', 29 'upload_date': '20171201', 30 'timestamp': 1512106377, 31 'series': 'Mighty Ships', 32 'season_number': 7, 33 'episode_number': 3, 34 'episode': 'Wind Surf', 35 }, 36 'params': { 37 'format': 'bestvideo', 38 'skip_download': True, 39 } 40 }, { 41 'url': 'https://7plus.com.au/UUUU?episode-id=AUMS43-001', 42 'only_matching': True, 43 }] 44 45 def _real_extract(self, url): 46 path, episode_id = self._match_valid_url(url).groups() 47 48 try: 49 media = self._download_json( 50 'https://videoservice.swm.digital/playback', episode_id, query={ 51 'appId': '7plus', 52 'deviceType': 'web', 53 'platformType': 'web', 54 'accountId': 5303576322001, 55 'referenceId': 'ref:' + episode_id, 56 'deliveryId': 'csai', 57 'videoType': 'vod', 58 })['media'] 59 except ExtractorError as e: 60 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: 61 raise ExtractorError(self._parse_json( 62 e.cause.read().decode(), episode_id)[0]['error_code'], expected=True) 63 raise 64 65 for source in media.get('sources', {}): 66 src = source.get('src') 67 if not src: 68 continue 69 source['src'] = update_url_query(src, {'rule': ''}) 70 71 info = self._parse_brightcove_metadata(media, episode_id) 72 73 content = self._download_json( 74 'https://component-cdn.swm.digital/content/' + path, 75 episode_id, headers={ 76 'market-id': 4, 77 }, fatal=False) or {} 78 for item in content.get('items', {}): 79 if item.get('componentData', {}).get('componentType') == 'infoPanel': 80 for src_key, dst_key in [('title', 'title'), ('shortSynopsis', 'description')]: 81 value = item.get(src_key) 82 if value: 83 info[dst_key] = value 84 info['series'] = try_get( 85 item, lambda x: x['seriesLogo']['name'], compat_str) 86 mobj = re.search(r'^S(\d+)\s+E(\d+)\s+-\s+(.+)$', info['title']) 87 if mobj: 88 info.update({ 89 'season_number': int(mobj.group(1)), 90 'episode_number': int(mobj.group(2)), 91 'episode': mobj.group(3), 92 }) 93 94 return info 95 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/yt_dlp/extractor/sevenplus.py b/yt_dlp/extractor/sevenplus.py --- a/yt_dlp/extractor/sevenplus.py +++ b/yt_dlp/extractor/sevenplus.py @@ -1,6 +1,7 @@ # coding: utf-8 from __future__ import unicode_literals +import json import re from .brightcove import BrightcoveNewIE @@ -42,9 +43,52 @@ 'only_matching': True, }] + def _real_initialize(self): + self.token = None + + cookies = self._get_cookies('https://7plus.com.au') + api_key = next((x for x in cookies if x.startswith('glt_')), '')[4:] + if not api_key: # Cookies are signed out, skip login + return + + login_resp = self._download_json( + 'https://login.7plus.com.au/accounts.getJWT', None, 'Logging in', fatal=False, + query={ + 'APIKey': api_key, + 'sdk': 'js_latest', + 'login_token': cookies[f'glt_{api_key}'].value, + 'authMode': 'cookie', + 'pageURL': 'https://7plus.com.au/', + 'sdkBuild': '12471', + 'format': 'json', + }) or {} + + if 'errorMessage' in login_resp: + self.report_warning(f'Unable to login: 7plus said: {login_resp["errorMessage"]}') + return + id_token = login_resp.get('id_token') + if not id_token: + self.report_warning('Unable to login: Could not extract id token') + return + + token_resp = self._download_json( + 'https://7plus.com.au/auth/token', None, 'Getting auth token', fatal=False, + headers={'Content-Type': 'application/json'}, data=json.dumps({ + 'idToken': id_token, + 'platformId': 'web', + 'regSource': '7plus', + }).encode('utf-8')) or {} + self.token = token_resp.get('token') + if not self.token: + self.report_warning('Unable to log in: Could not extract auth token') + def _real_extract(self, url): path, episode_id = self._match_valid_url(url).groups() + headers = {} + if self.token: + headers['Authorization'] = f'Bearer {self.token}' + try: media = self._download_json( 'https://videoservice.swm.digital/playback', episode_id, query={ @@ -55,7 +99,7 @@ 'referenceId': 'ref:' + episode_id, 'deliveryId': 'csai', 'videoType': 'vod', - })['media'] + }, headers=headers)['media'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: raise ExtractorError(self._parse_json(
{"golden_diff": "diff --git a/yt_dlp/extractor/sevenplus.py b/yt_dlp/extractor/sevenplus.py\n--- a/yt_dlp/extractor/sevenplus.py\n+++ b/yt_dlp/extractor/sevenplus.py\n@@ -1,6 +1,7 @@\n # coding: utf-8\n from __future__ import unicode_literals\n \n+import json\n import re\n \n from .brightcove import BrightcoveNewIE\n@@ -42,9 +43,52 @@\n 'only_matching': True,\n }]\n \n+ def _real_initialize(self):\n+ self.token = None\n+\n+ cookies = self._get_cookies('https://7plus.com.au')\n+ api_key = next((x for x in cookies if x.startswith('glt_')), '')[4:]\n+ if not api_key: # Cookies are signed out, skip login\n+ return\n+\n+ login_resp = self._download_json(\n+ 'https://login.7plus.com.au/accounts.getJWT', None, 'Logging in', fatal=False,\n+ query={\n+ 'APIKey': api_key,\n+ 'sdk': 'js_latest',\n+ 'login_token': cookies[f'glt_{api_key}'].value,\n+ 'authMode': 'cookie',\n+ 'pageURL': 'https://7plus.com.au/',\n+ 'sdkBuild': '12471',\n+ 'format': 'json',\n+ }) or {}\n+\n+ if 'errorMessage' in login_resp:\n+ self.report_warning(f'Unable to login: 7plus said: {login_resp[\"errorMessage\"]}')\n+ return\n+ id_token = login_resp.get('id_token')\n+ if not id_token:\n+ self.report_warning('Unable to login: Could not extract id token')\n+ return\n+\n+ token_resp = self._download_json(\n+ 'https://7plus.com.au/auth/token', None, 'Getting auth token', fatal=False,\n+ headers={'Content-Type': 'application/json'}, data=json.dumps({\n+ 'idToken': id_token,\n+ 'platformId': 'web',\n+ 'regSource': '7plus',\n+ }).encode('utf-8')) or {}\n+ self.token = token_resp.get('token')\n+ if not self.token:\n+ self.report_warning('Unable to log in: Could not extract auth token')\n+\n def _real_extract(self, url):\n path, episode_id = self._match_valid_url(url).groups()\n \n+ headers = {}\n+ if self.token:\n+ headers['Authorization'] = f'Bearer {self.token}'\n+\n try:\n media = self._download_json(\n 'https://videoservice.swm.digital/playback', episode_id, query={\n@@ -55,7 +99,7 @@\n 'referenceId': 'ref:' + episode_id,\n 'deliveryId': 'csai',\n 'videoType': 'vod',\n- })['media']\n+ }, headers=headers)['media']\n except ExtractorError as e:\n if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:\n raise ExtractorError(self._parse_json(\n", "issue": "[Site Request] 7plus add login/pass to access 720p resolution\n<!--\r\n\r\n######################################################################\r\n WARNING!\r\n IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE\r\n######################################################################\r\n\r\n-->\r\n\r\n\r\n## Checklist\r\n\r\n<!--\r\nCarefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:\r\n- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.09.25. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.\r\n- Search the bugtracker for similar site feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.\r\n- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)\r\n-->\r\n\r\n- [x] I'm reporting a site feature request\r\n- [x] I've verified that I'm running yt-dlp version **2021.09.25**\r\n- [x] I've searched the bugtracker for similar site feature requests including closed ones\r\n\r\n\r\n## Description\r\n\r\n<!--\r\nProvide an explanation of your site feature request in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.\r\n-->\r\n\r\n7Plus has 720p resolution available behind a login/pass. Current site functionality only pulls 540p. \n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .brightcove import BrightcoveNewIE\nfrom ..compat import (\n compat_HTTPError,\n compat_str,\n)\nfrom ..utils import (\n ExtractorError,\n try_get,\n update_url_query,\n)\n\n\nclass SevenPlusIE(BrightcoveNewIE):\n IE_NAME = '7plus'\n _VALID_URL = r'https?://(?:www\\.)?7plus\\.com\\.au/(?P<path>[^?]+\\?.*?\\bepisode-id=(?P<id>[^&#]+))'\n _TESTS = [{\n 'url': 'https://7plus.com.au/MTYS?episode-id=MTYS7-003',\n 'info_dict': {\n 'id': 'MTYS7-003',\n 'ext': 'mp4',\n 'title': 'S7 E3 - Wind Surf',\n 'description': 'md5:29c6a69f21accda7601278f81b46483d',\n 'uploader_id': '5303576322001',\n 'upload_date': '20171201',\n 'timestamp': 1512106377,\n 'series': 'Mighty Ships',\n 'season_number': 7,\n 'episode_number': 3,\n 'episode': 'Wind Surf',\n },\n 'params': {\n 'format': 'bestvideo',\n 'skip_download': True,\n }\n }, {\n 'url': 'https://7plus.com.au/UUUU?episode-id=AUMS43-001',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n path, episode_id = self._match_valid_url(url).groups()\n\n try:\n media = self._download_json(\n 'https://videoservice.swm.digital/playback', episode_id, query={\n 'appId': '7plus',\n 'deviceType': 'web',\n 'platformType': 'web',\n 'accountId': 5303576322001,\n 'referenceId': 'ref:' + episode_id,\n 'deliveryId': 'csai',\n 'videoType': 'vod',\n })['media']\n except ExtractorError as e:\n if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:\n raise ExtractorError(self._parse_json(\n e.cause.read().decode(), episode_id)[0]['error_code'], expected=True)\n raise\n\n for source in media.get('sources', {}):\n src = source.get('src')\n if not src:\n continue\n source['src'] = update_url_query(src, {'rule': ''})\n\n info = self._parse_brightcove_metadata(media, episode_id)\n\n content = self._download_json(\n 'https://component-cdn.swm.digital/content/' + path,\n episode_id, headers={\n 'market-id': 4,\n }, fatal=False) or {}\n for item in content.get('items', {}):\n if item.get('componentData', {}).get('componentType') == 'infoPanel':\n for src_key, dst_key in [('title', 'title'), ('shortSynopsis', 'description')]:\n value = item.get(src_key)\n if value:\n info[dst_key] = value\n info['series'] = try_get(\n item, lambda x: x['seriesLogo']['name'], compat_str)\n mobj = re.search(r'^S(\\d+)\\s+E(\\d+)\\s+-\\s+(.+)$', info['title'])\n if mobj:\n info.update({\n 'season_number': int(mobj.group(1)),\n 'episode_number': int(mobj.group(2)),\n 'episode': mobj.group(3),\n })\n\n return info\n", "path": "yt_dlp/extractor/sevenplus.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom .brightcove import BrightcoveNewIE\nfrom ..compat import (\n compat_HTTPError,\n compat_str,\n)\nfrom ..utils import (\n ExtractorError,\n try_get,\n update_url_query,\n)\n\n\nclass SevenPlusIE(BrightcoveNewIE):\n IE_NAME = '7plus'\n _VALID_URL = r'https?://(?:www\\.)?7plus\\.com\\.au/(?P<path>[^?]+\\?.*?\\bepisode-id=(?P<id>[^&#]+))'\n _TESTS = [{\n 'url': 'https://7plus.com.au/MTYS?episode-id=MTYS7-003',\n 'info_dict': {\n 'id': 'MTYS7-003',\n 'ext': 'mp4',\n 'title': 'S7 E3 - Wind Surf',\n 'description': 'md5:29c6a69f21accda7601278f81b46483d',\n 'uploader_id': '5303576322001',\n 'upload_date': '20171201',\n 'timestamp': 1512106377,\n 'series': 'Mighty Ships',\n 'season_number': 7,\n 'episode_number': 3,\n 'episode': 'Wind Surf',\n },\n 'params': {\n 'format': 'bestvideo',\n 'skip_download': True,\n }\n }, {\n 'url': 'https://7plus.com.au/UUUU?episode-id=AUMS43-001',\n 'only_matching': True,\n }]\n\n def _real_initialize(self):\n self.token = None\n\n cookies = self._get_cookies('https://7plus.com.au')\n api_key = next((x for x in cookies if x.startswith('glt_')), '')[4:]\n if not api_key: # Cookies are signed out, skip login\n return\n\n login_resp = self._download_json(\n 'https://login.7plus.com.au/accounts.getJWT', None, 'Logging in', fatal=False,\n query={\n 'APIKey': api_key,\n 'sdk': 'js_latest',\n 'login_token': cookies[f'glt_{api_key}'].value,\n 'authMode': 'cookie',\n 'pageURL': 'https://7plus.com.au/',\n 'sdkBuild': '12471',\n 'format': 'json',\n }) or {}\n\n if 'errorMessage' in login_resp:\n self.report_warning(f'Unable to login: 7plus said: {login_resp[\"errorMessage\"]}')\n return\n id_token = login_resp.get('id_token')\n if not id_token:\n self.report_warning('Unable to login: Could not extract id token')\n return\n\n token_resp = self._download_json(\n 'https://7plus.com.au/auth/token', None, 'Getting auth token', fatal=False,\n headers={'Content-Type': 'application/json'}, data=json.dumps({\n 'idToken': id_token,\n 'platformId': 'web',\n 'regSource': '7plus',\n }).encode('utf-8')) or {}\n self.token = token_resp.get('token')\n if not self.token:\n self.report_warning('Unable to log in: Could not extract auth token')\n\n def _real_extract(self, url):\n path, episode_id = self._match_valid_url(url).groups()\n\n headers = {}\n if self.token:\n headers['Authorization'] = f'Bearer {self.token}'\n\n try:\n media = self._download_json(\n 'https://videoservice.swm.digital/playback', episode_id, query={\n 'appId': '7plus',\n 'deviceType': 'web',\n 'platformType': 'web',\n 'accountId': 5303576322001,\n 'referenceId': 'ref:' + episode_id,\n 'deliveryId': 'csai',\n 'videoType': 'vod',\n }, headers=headers)['media']\n except ExtractorError as e:\n if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:\n raise ExtractorError(self._parse_json(\n e.cause.read().decode(), episode_id)[0]['error_code'], expected=True)\n raise\n\n for source in media.get('sources', {}):\n src = source.get('src')\n if not src:\n continue\n source['src'] = update_url_query(src, {'rule': ''})\n\n info = self._parse_brightcove_metadata(media, episode_id)\n\n content = self._download_json(\n 'https://component-cdn.swm.digital/content/' + path,\n episode_id, headers={\n 'market-id': 4,\n }, fatal=False) or {}\n for item in content.get('items', {}):\n if item.get('componentData', {}).get('componentType') == 'infoPanel':\n for src_key, dst_key in [('title', 'title'), ('shortSynopsis', 'description')]:\n value = item.get(src_key)\n if value:\n info[dst_key] = value\n info['series'] = try_get(\n item, lambda x: x['seriesLogo']['name'], compat_str)\n mobj = re.search(r'^S(\\d+)\\s+E(\\d+)\\s+-\\s+(.+)$', info['title'])\n if mobj:\n info.update({\n 'season_number': int(mobj.group(1)),\n 'episode_number': int(mobj.group(2)),\n 'episode': mobj.group(3),\n })\n\n return info\n", "path": "yt_dlp/extractor/sevenplus.py"}]}
1,679
698
gh_patches_debug_25239
rasdani/github-patches
git_diff
learningequality__kolibri-1604
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DateTimeTzFields are serialized with incorrect times Example saved this time in a `ChannelMetaDataCache` object (at around 14:00 local time) `2017-06-06 14:44:12.582879(America/Los_Angeles)` But it gets serialized as `2017-06-06T07:44:12.582-07:00` Expected is `2017-06-06T14:44:12.582-07:00` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/core/serializers.py` Content: ``` 1 from django.utils import timezone 2 from rest_framework.serializers import DateTimeField, ModelSerializer 3 from .fields import DateTimeTzField as DjangoDateTimeTzField 4 5 class DateTimeTzField(DateTimeField): 6 7 def to_internal_value(self, data): 8 data = super(DateTimeTzField, self).to_internal_value(data) 9 tz = timezone.get_current_timezone() 10 return data.astimezone(tz) 11 12 13 serializer_field_mapping = { 14 DjangoDateTimeTzField: DateTimeTzField, 15 } 16 17 serializer_field_mapping.update(ModelSerializer.serializer_field_mapping) 18 19 class KolibriModelSerializer(ModelSerializer): 20 21 serializer_field_mapping = serializer_field_mapping 22 ``` Path: `kolibri/core/fields.py` Content: ``` 1 import datetime 2 import re 3 4 import pytz 5 from django.db.models.fields import Field 6 from django.utils import timezone 7 8 date_time_format = "%Y-%m-%d %H:%M:%S.%f" 9 tz_format = "({tz})" 10 tz_regex = re.compile("\(([^\)]+)\)") 11 db_storage_string = "{date_time_string}{tz_string}" 12 13 def parse_timezonestamp(value): 14 if tz_regex.search(value): 15 tz = pytz.timezone(tz_regex.search(value).groups()[0]) 16 else: 17 tz = timezone.get_current_timezone() 18 utc_value = tz_regex.sub('', value) 19 value = datetime.datetime.strptime(utc_value, date_time_format) 20 value = timezone.make_aware(value, pytz.utc) 21 return value.astimezone(tz) 22 23 def create_timezonestamp(value): 24 if value.tzinfo: 25 tz = value.tzinfo.zone 26 else: 27 tz = timezone.get_current_timezone().zone 28 date_time_string = value.strftime(date_time_format) 29 tz_string = tz_format.format(tz=tz) 30 value = db_storage_string.format(date_time_string=date_time_string, tz_string=tz_string) 31 return value 32 33 class DateTimeTzField(Field): 34 """ 35 A field that stores datetime information as a char in this format: 36 37 %Y-%m-%d %H:%M:%S.%f(<tzinfo>) 38 39 It reads a timezone aware datetime object, and extracts the timezone zone information 40 then parses the datetime into the format above with the timezone information appended. 41 42 As this is ISO formatted, alphabetic sorting should still allow for proper queries 43 against this in the database. Mostly engineered for SQLite usage. 44 """ 45 46 def db_type(self, connection): 47 return "varchar" 48 49 def from_db_value(self, value, expression, connection, context): 50 if value is None: 51 return value 52 return parse_timezonestamp(value) 53 54 def to_python(self, value): 55 if isinstance(value, datetime.datetime): 56 return value 57 58 if value is None: 59 return value 60 61 return parse_timezonestamp(value) 62 63 def get_prep_value(self, value): 64 # Casts datetimes into the format expected by the backend 65 if value is None: 66 return value 67 return create_timezonestamp(value) 68 69 def get_db_prep_value(self, value, connection, prepared=False): 70 if not prepared: 71 value = self.get_prep_value(value) 72 return value 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/core/fields.py b/kolibri/core/fields.py --- a/kolibri/core/fields.py +++ b/kolibri/core/fields.py @@ -25,7 +25,8 @@ tz = value.tzinfo.zone else: tz = timezone.get_current_timezone().zone - date_time_string = value.strftime(date_time_format) + value = timezone.make_aware(value, timezone.get_current_timezone()) + date_time_string = value.astimezone(pytz.utc).strftime(date_time_format) tz_string = tz_format.format(tz=tz) value = db_storage_string.format(date_time_string=date_time_string, tz_string=tz_string) return value diff --git a/kolibri/core/serializers.py b/kolibri/core/serializers.py --- a/kolibri/core/serializers.py +++ b/kolibri/core/serializers.py @@ -1,3 +1,5 @@ +import pytz + from django.utils import timezone from rest_framework.serializers import DateTimeField, ModelSerializer from .fields import DateTimeTzField as DjangoDateTimeTzField @@ -7,6 +9,8 @@ def to_internal_value(self, data): data = super(DateTimeTzField, self).to_internal_value(data) tz = timezone.get_current_timezone() + if not data.tzinfo: + data = timezone.make_aware(data, pytz.utc) return data.astimezone(tz)
{"golden_diff": "diff --git a/kolibri/core/fields.py b/kolibri/core/fields.py\n--- a/kolibri/core/fields.py\n+++ b/kolibri/core/fields.py\n@@ -25,7 +25,8 @@\n tz = value.tzinfo.zone\n else:\n tz = timezone.get_current_timezone().zone\n- date_time_string = value.strftime(date_time_format)\n+ value = timezone.make_aware(value, timezone.get_current_timezone())\n+ date_time_string = value.astimezone(pytz.utc).strftime(date_time_format)\n tz_string = tz_format.format(tz=tz)\n value = db_storage_string.format(date_time_string=date_time_string, tz_string=tz_string)\n return value\ndiff --git a/kolibri/core/serializers.py b/kolibri/core/serializers.py\n--- a/kolibri/core/serializers.py\n+++ b/kolibri/core/serializers.py\n@@ -1,3 +1,5 @@\n+import pytz\n+\n from django.utils import timezone\n from rest_framework.serializers import DateTimeField, ModelSerializer\n from .fields import DateTimeTzField as DjangoDateTimeTzField\n@@ -7,6 +9,8 @@\n def to_internal_value(self, data):\n data = super(DateTimeTzField, self).to_internal_value(data)\n tz = timezone.get_current_timezone()\n+ if not data.tzinfo:\n+ data = timezone.make_aware(data, pytz.utc)\n return data.astimezone(tz)\n", "issue": "DateTimeTzFields are serialized with incorrect times\nExample saved this time in a `ChannelMetaDataCache` object (at around 14:00 local time)\r\n\r\n`2017-06-06 14:44:12.582879(America/Los_Angeles)`\r\n\r\nBut it gets serialized as\r\n\r\n`2017-06-06T07:44:12.582-07:00`\r\n\r\nExpected is\r\n\r\n`2017-06-06T14:44:12.582-07:00`\r\n\n", "before_files": [{"content": "from django.utils import timezone\nfrom rest_framework.serializers import DateTimeField, ModelSerializer\nfrom .fields import DateTimeTzField as DjangoDateTimeTzField\n\nclass DateTimeTzField(DateTimeField):\n\n def to_internal_value(self, data):\n data = super(DateTimeTzField, self).to_internal_value(data)\n tz = timezone.get_current_timezone()\n return data.astimezone(tz)\n\n\nserializer_field_mapping = {\n DjangoDateTimeTzField: DateTimeTzField,\n}\n\nserializer_field_mapping.update(ModelSerializer.serializer_field_mapping)\n\nclass KolibriModelSerializer(ModelSerializer):\n\n serializer_field_mapping = serializer_field_mapping\n", "path": "kolibri/core/serializers.py"}, {"content": "import datetime\nimport re\n\nimport pytz\nfrom django.db.models.fields import Field\nfrom django.utils import timezone\n\ndate_time_format = \"%Y-%m-%d %H:%M:%S.%f\"\ntz_format = \"({tz})\"\ntz_regex = re.compile(\"\\(([^\\)]+)\\)\")\ndb_storage_string = \"{date_time_string}{tz_string}\"\n\ndef parse_timezonestamp(value):\n if tz_regex.search(value):\n tz = pytz.timezone(tz_regex.search(value).groups()[0])\n else:\n tz = timezone.get_current_timezone()\n utc_value = tz_regex.sub('', value)\n value = datetime.datetime.strptime(utc_value, date_time_format)\n value = timezone.make_aware(value, pytz.utc)\n return value.astimezone(tz)\n\ndef create_timezonestamp(value):\n if value.tzinfo:\n tz = value.tzinfo.zone\n else:\n tz = timezone.get_current_timezone().zone\n date_time_string = value.strftime(date_time_format)\n tz_string = tz_format.format(tz=tz)\n value = db_storage_string.format(date_time_string=date_time_string, tz_string=tz_string)\n return value\n\nclass DateTimeTzField(Field):\n \"\"\"\n A field that stores datetime information as a char in this format:\n\n %Y-%m-%d %H:%M:%S.%f(<tzinfo>)\n\n It reads a timezone aware datetime object, and extracts the timezone zone information\n then parses the datetime into the format above with the timezone information appended.\n\n As this is ISO formatted, alphabetic sorting should still allow for proper queries\n against this in the database. Mostly engineered for SQLite usage.\n \"\"\"\n\n def db_type(self, connection):\n return \"varchar\"\n\n def from_db_value(self, value, expression, connection, context):\n if value is None:\n return value\n return parse_timezonestamp(value)\n\n def to_python(self, value):\n if isinstance(value, datetime.datetime):\n return value\n\n if value is None:\n return value\n\n return parse_timezonestamp(value)\n\n def get_prep_value(self, value):\n # Casts datetimes into the format expected by the backend\n if value is None:\n return value\n return create_timezonestamp(value)\n\n def get_db_prep_value(self, value, connection, prepared=False):\n if not prepared:\n value = self.get_prep_value(value)\n return value\n", "path": "kolibri/core/fields.py"}], "after_files": [{"content": "import pytz\n\nfrom django.utils import timezone\nfrom rest_framework.serializers import DateTimeField, ModelSerializer\nfrom .fields import DateTimeTzField as DjangoDateTimeTzField\n\nclass DateTimeTzField(DateTimeField):\n\n def to_internal_value(self, data):\n data = super(DateTimeTzField, self).to_internal_value(data)\n tz = timezone.get_current_timezone()\n if not data.tzinfo:\n data = timezone.make_aware(data, pytz.utc)\n return data.astimezone(tz)\n\n\nserializer_field_mapping = {\n DjangoDateTimeTzField: DateTimeTzField,\n}\n\nserializer_field_mapping.update(ModelSerializer.serializer_field_mapping)\n\nclass KolibriModelSerializer(ModelSerializer):\n\n serializer_field_mapping = serializer_field_mapping\n", "path": "kolibri/core/serializers.py"}, {"content": "import datetime\nimport re\n\nimport pytz\nfrom django.db.models.fields import Field\nfrom django.utils import timezone\n\ndate_time_format = \"%Y-%m-%d %H:%M:%S.%f\"\ntz_format = \"({tz})\"\ntz_regex = re.compile(\"\\(([^\\)]+)\\)\")\ndb_storage_string = \"{date_time_string}{tz_string}\"\n\ndef parse_timezonestamp(value):\n if tz_regex.search(value):\n tz = pytz.timezone(tz_regex.search(value).groups()[0])\n else:\n tz = timezone.get_current_timezone()\n utc_value = tz_regex.sub('', value)\n value = datetime.datetime.strptime(utc_value, date_time_format)\n value = timezone.make_aware(value, pytz.utc)\n return value.astimezone(tz)\n\ndef create_timezonestamp(value):\n if value.tzinfo:\n tz = value.tzinfo.zone\n else:\n tz = timezone.get_current_timezone().zone\n value = timezone.make_aware(value, timezone.get_current_timezone())\n date_time_string = value.astimezone(pytz.utc).strftime(date_time_format)\n tz_string = tz_format.format(tz=tz)\n value = db_storage_string.format(date_time_string=date_time_string, tz_string=tz_string)\n return value\n\nclass DateTimeTzField(Field):\n \"\"\"\n A field that stores datetime information as a char in this format:\n\n %Y-%m-%d %H:%M:%S.%f(<tzinfo>)\n\n It reads a timezone aware datetime object, and extracts the timezone zone information\n then parses the datetime into the format above with the timezone information appended.\n\n As this is ISO formatted, alphabetic sorting should still allow for proper queries\n against this in the database. Mostly engineered for SQLite usage.\n \"\"\"\n\n def db_type(self, connection):\n return \"varchar\"\n\n def from_db_value(self, value, expression, connection, context):\n if value is None:\n return value\n return parse_timezonestamp(value)\n\n def to_python(self, value):\n if isinstance(value, datetime.datetime):\n return value\n\n if value is None:\n return value\n\n return parse_timezonestamp(value)\n\n def get_prep_value(self, value):\n # Casts datetimes into the format expected by the backend\n if value is None:\n return value\n return create_timezonestamp(value)\n\n def get_db_prep_value(self, value, connection, prepared=False):\n if not prepared:\n value = self.get_prep_value(value)\n return value\n", "path": "kolibri/core/fields.py"}]}
1,256
325
gh_patches_debug_2535
rasdani/github-patches
git_diff
python__peps-2229
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Don't auto-add inline links to ref section & rm if empty, per #2130 First step to implementing #2130 , as agreed with @gvanrossum and the PEP editor team. When building, don't add redundant footnotes and references entries for URLs that are already directly linked inline. This avoids an unnecessary, potentially confusing footnote for each link, and taking up additional space in the references section for no real benefit, plus simplifies the build code and should improve build time, especially for Sphinx. Furthermore, if the references section is empty (not including now-invisible link targets), remove it, as we did before (but in a more robust manner). This allows past and future PEPs to still use inline references with targets stored in the references section, while avoiding showing a now-empty references section. These are both implemented for Sphinx and the legacy builder, and I visually inspected a variety of PEPs with various cases with both builders to ensure the desired results were achieved, and there were no obvious side effects from this change. Following merging this PR, following the plan outlined in #2130 , I'll proceed with one updating the meta-PEP docs in PEP 0, PEP 1 and PEP 12 to reflect the revised policy of using standard reST links (inline or with separate targets) rather than the references section, and follow that with a PR updating the relative handful of references in the other active meta-PEPs, for consistency. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pep_sphinx_extensions/pep_processor/transforms/pep_footer.py` Content: ``` 1 import datetime 2 from pathlib import Path 3 import subprocess 4 5 from docutils import nodes 6 from docutils import transforms 7 8 9 class PEPFooter(transforms.Transform): 10 """Footer transforms for PEPs. 11 12 - Removes the References section if it is empty when rendered. 13 - Creates a link to the (GitHub) source text. 14 15 Source Link: 16 Create the link to the source file from the document source path, 17 and append the text to the end of the document. 18 19 """ 20 21 # Uses same priority as docutils.transforms.TargetNotes 22 default_priority = 520 23 24 def apply(self) -> None: 25 pep_source_path = Path(self.document["source"]) 26 if not pep_source_path.match("pep-*"): 27 return # not a PEP file, exit early 28 29 # Iterate through sections from the end of the document 30 for section in reversed(self.document[0]): 31 if not isinstance(section, nodes.section): 32 continue 33 title_words = section[0].astext().lower().split() 34 if "references" in title_words: 35 # Remove references section if there are no displayed 36 # footnotes (it only has title & link target nodes) 37 if all(isinstance(ref_node, (nodes.title, nodes.target)) 38 for ref_node in section): 39 section.parent.remove(section) 40 break 41 42 # Add link to source text and last modified date 43 if pep_source_path.stem != "pep-0000": 44 self.document += _add_source_link(pep_source_path) 45 self.document += _add_commit_history_info(pep_source_path) 46 47 48 def _add_source_link(pep_source_path: Path) -> nodes.paragraph: 49 """Add link to source text on VCS (GitHub)""" 50 source_link = f"https://github.com/python/peps/blob/main/{pep_source_path.name}" 51 link_node = nodes.reference("", source_link, refuri=source_link) 52 return nodes.paragraph("", "Source: ", link_node) 53 54 55 def _add_commit_history_info(pep_source_path: Path) -> nodes.paragraph: 56 """Use local git history to find last modified date.""" 57 try: 58 since_epoch = LAST_MODIFIED_TIMES[pep_source_path.name] 59 except KeyError: 60 return nodes.paragraph() 61 62 iso_time = datetime.datetime.utcfromtimestamp(since_epoch).isoformat(sep=" ") 63 commit_link = f"https://github.com/python/peps/commits/main/{pep_source_path.name}" 64 link_node = nodes.reference("", f"{iso_time} GMT", refuri=commit_link) 65 return nodes.paragraph("", "Last modified: ", link_node) 66 67 68 def _get_last_modified_timestamps(): 69 # get timestamps and changed files from all commits (without paging results) 70 args = ["git", "--no-pager", "log", "--format=#%at", "--name-only"] 71 with subprocess.Popen(args, stdout=subprocess.PIPE) as process: 72 all_modified = process.stdout.read().decode("utf-8") 73 process.stdout.close() 74 if process.wait(): # non-zero return code 75 return {} 76 77 # set up the dictionary with the *current* files 78 last_modified = {path.name: 0 for path in Path().glob("pep-*") if path.suffix in {".txt", ".rst"}} 79 80 # iterate through newest to oldest, updating per file timestamps 81 change_sets = all_modified.removeprefix("#").split("#") 82 for change_set in change_sets: 83 timestamp, files = change_set.split("\n", 1) 84 for file in files.strip().split("\n"): 85 if file.startswith("pep-") and file[-3:] in {"txt", "rst"}: 86 if last_modified.get(file) == 0: 87 try: 88 last_modified[file] = float(timestamp) 89 except ValueError: 90 pass # if float conversion fails 91 92 return last_modified 93 94 95 LAST_MODIFIED_TIMES = _get_last_modified_timestamps() 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py b/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py --- a/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py +++ b/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py @@ -18,8 +18,8 @@ """ - # Uses same priority as docutils.transforms.TargetNotes - default_priority = 520 + # Set low priority so ref targets aren't removed before they are needed + default_priority = 999 def apply(self) -> None: pep_source_path = Path(self.document["source"])
{"golden_diff": "diff --git a/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py b/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py\n--- a/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py\n+++ b/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py\n@@ -18,8 +18,8 @@\n \n \"\"\"\n \n- # Uses same priority as docutils.transforms.TargetNotes\n- default_priority = 520\n+ # Set low priority so ref targets aren't removed before they are needed\n+ default_priority = 999\n \n def apply(self) -> None:\n pep_source_path = Path(self.document[\"source\"])\n", "issue": "Don't auto-add inline links to ref section & rm if empty, per #2130\nFirst step to implementing #2130 , as agreed with @gvanrossum and the PEP editor team.\r\n\r\nWhen building, don't add redundant footnotes and references entries for URLs that are already directly linked inline. This avoids an unnecessary, potentially confusing footnote for each link, and taking up additional space in the references section for no real benefit, plus simplifies the build code and should improve build time, especially for Sphinx. Furthermore, if the references section is empty (not including now-invisible link targets), remove it, as we did before (but in a more robust manner). This allows past and future PEPs to still use inline references with targets stored in the references section, while avoiding showing a now-empty references section.\r\n\r\nThese are both implemented for Sphinx and the legacy builder, and I visually inspected a variety of PEPs with various cases with both builders to ensure the desired results were achieved, and there were no obvious side effects from this change.\r\n\r\nFollowing merging this PR, following the plan outlined in #2130 , I'll proceed with one updating the meta-PEP docs in PEP 0, PEP 1 and PEP 12 to reflect the revised policy of using standard reST links (inline or with separate targets) rather than the references section, and follow that with a PR updating the relative handful of references in the other active meta-PEPs, for consistency.\n", "before_files": [{"content": "import datetime\nfrom pathlib import Path\nimport subprocess\n\nfrom docutils import nodes\nfrom docutils import transforms\n\n\nclass PEPFooter(transforms.Transform):\n \"\"\"Footer transforms for PEPs.\n\n - Removes the References section if it is empty when rendered.\n - Creates a link to the (GitHub) source text.\n\n Source Link:\n Create the link to the source file from the document source path,\n and append the text to the end of the document.\n\n \"\"\"\n\n # Uses same priority as docutils.transforms.TargetNotes\n default_priority = 520\n\n def apply(self) -> None:\n pep_source_path = Path(self.document[\"source\"])\n if not pep_source_path.match(\"pep-*\"):\n return # not a PEP file, exit early\n\n # Iterate through sections from the end of the document\n for section in reversed(self.document[0]):\n if not isinstance(section, nodes.section):\n continue\n title_words = section[0].astext().lower().split()\n if \"references\" in title_words:\n # Remove references section if there are no displayed\n # footnotes (it only has title & link target nodes)\n if all(isinstance(ref_node, (nodes.title, nodes.target))\n for ref_node in section):\n section.parent.remove(section)\n break\n\n # Add link to source text and last modified date\n if pep_source_path.stem != \"pep-0000\":\n self.document += _add_source_link(pep_source_path)\n self.document += _add_commit_history_info(pep_source_path)\n\n\ndef _add_source_link(pep_source_path: Path) -> nodes.paragraph:\n \"\"\"Add link to source text on VCS (GitHub)\"\"\"\n source_link = f\"https://github.com/python/peps/blob/main/{pep_source_path.name}\"\n link_node = nodes.reference(\"\", source_link, refuri=source_link)\n return nodes.paragraph(\"\", \"Source: \", link_node)\n\n\ndef _add_commit_history_info(pep_source_path: Path) -> nodes.paragraph:\n \"\"\"Use local git history to find last modified date.\"\"\"\n try:\n since_epoch = LAST_MODIFIED_TIMES[pep_source_path.name]\n except KeyError:\n return nodes.paragraph()\n\n iso_time = datetime.datetime.utcfromtimestamp(since_epoch).isoformat(sep=\" \")\n commit_link = f\"https://github.com/python/peps/commits/main/{pep_source_path.name}\"\n link_node = nodes.reference(\"\", f\"{iso_time} GMT\", refuri=commit_link)\n return nodes.paragraph(\"\", \"Last modified: \", link_node)\n\n\ndef _get_last_modified_timestamps():\n # get timestamps and changed files from all commits (without paging results)\n args = [\"git\", \"--no-pager\", \"log\", \"--format=#%at\", \"--name-only\"]\n with subprocess.Popen(args, stdout=subprocess.PIPE) as process:\n all_modified = process.stdout.read().decode(\"utf-8\")\n process.stdout.close()\n if process.wait(): # non-zero return code\n return {}\n\n # set up the dictionary with the *current* files\n last_modified = {path.name: 0 for path in Path().glob(\"pep-*\") if path.suffix in {\".txt\", \".rst\"}}\n\n # iterate through newest to oldest, updating per file timestamps\n change_sets = all_modified.removeprefix(\"#\").split(\"#\")\n for change_set in change_sets:\n timestamp, files = change_set.split(\"\\n\", 1)\n for file in files.strip().split(\"\\n\"):\n if file.startswith(\"pep-\") and file[-3:] in {\"txt\", \"rst\"}:\n if last_modified.get(file) == 0:\n try:\n last_modified[file] = float(timestamp)\n except ValueError:\n pass # if float conversion fails\n\n return last_modified\n\n\nLAST_MODIFIED_TIMES = _get_last_modified_timestamps()\n", "path": "pep_sphinx_extensions/pep_processor/transforms/pep_footer.py"}], "after_files": [{"content": "import datetime\nfrom pathlib import Path\nimport subprocess\n\nfrom docutils import nodes\nfrom docutils import transforms\n\n\nclass PEPFooter(transforms.Transform):\n \"\"\"Footer transforms for PEPs.\n\n - Removes the References section if it is empty when rendered.\n - Creates a link to the (GitHub) source text.\n\n Source Link:\n Create the link to the source file from the document source path,\n and append the text to the end of the document.\n\n \"\"\"\n\n # Set low priority so ref targets aren't removed before they are needed\n default_priority = 999\n\n def apply(self) -> None:\n pep_source_path = Path(self.document[\"source\"])\n if not pep_source_path.match(\"pep-*\"):\n return # not a PEP file, exit early\n\n # Iterate through sections from the end of the document\n for section in reversed(self.document[0]):\n if not isinstance(section, nodes.section):\n continue\n title_words = section[0].astext().lower().split()\n if \"references\" in title_words:\n # Remove references section if there are no displayed\n # footnotes (it only has title & link target nodes)\n if all(isinstance(ref_node, (nodes.title, nodes.target))\n for ref_node in section):\n section.parent.remove(section)\n break\n\n # Add link to source text and last modified date\n if pep_source_path.stem != \"pep-0000\":\n self.document += _add_source_link(pep_source_path)\n self.document += _add_commit_history_info(pep_source_path)\n\n\ndef _add_source_link(pep_source_path: Path) -> nodes.paragraph:\n \"\"\"Add link to source text on VCS (GitHub)\"\"\"\n source_link = f\"https://github.com/python/peps/blob/main/{pep_source_path.name}\"\n link_node = nodes.reference(\"\", source_link, refuri=source_link)\n return nodes.paragraph(\"\", \"Source: \", link_node)\n\n\ndef _add_commit_history_info(pep_source_path: Path) -> nodes.paragraph:\n \"\"\"Use local git history to find last modified date.\"\"\"\n try:\n since_epoch = LAST_MODIFIED_TIMES[pep_source_path.name]\n except KeyError:\n return nodes.paragraph()\n\n iso_time = datetime.datetime.utcfromtimestamp(since_epoch).isoformat(sep=\" \")\n commit_link = f\"https://github.com/python/peps/commits/main/{pep_source_path.name}\"\n link_node = nodes.reference(\"\", f\"{iso_time} GMT\", refuri=commit_link)\n return nodes.paragraph(\"\", \"Last modified: \", link_node)\n\n\ndef _get_last_modified_timestamps():\n # get timestamps and changed files from all commits (without paging results)\n args = [\"git\", \"--no-pager\", \"log\", \"--format=#%at\", \"--name-only\"]\n with subprocess.Popen(args, stdout=subprocess.PIPE) as process:\n all_modified = process.stdout.read().decode(\"utf-8\")\n process.stdout.close()\n if process.wait(): # non-zero return code\n return {}\n\n # set up the dictionary with the *current* files\n last_modified = {path.name: 0 for path in Path().glob(\"pep-*\") if path.suffix in {\".txt\", \".rst\"}}\n\n # iterate through newest to oldest, updating per file timestamps\n change_sets = all_modified.removeprefix(\"#\").split(\"#\")\n for change_set in change_sets:\n timestamp, files = change_set.split(\"\\n\", 1)\n for file in files.strip().split(\"\\n\"):\n if file.startswith(\"pep-\") and file[-3:] in {\"txt\", \"rst\"}:\n if last_modified.get(file) == 0:\n try:\n last_modified[file] = float(timestamp)\n except ValueError:\n pass # if float conversion fails\n\n return last_modified\n\n\nLAST_MODIFIED_TIMES = _get_last_modified_timestamps()\n", "path": "pep_sphinx_extensions/pep_processor/transforms/pep_footer.py"}]}
1,604
165
gh_patches_debug_10524
rasdani/github-patches
git_diff
Gallopsled__pwntools-2191
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- readthedocs.org builds are broken The docs fail to build for a while, but differently since https://github.com/Gallopsled/pwntools/pull/2179. They're still built using Python 2.7, which had problems installing rpyc and is now missing the toml package before running the setup.py. https://readthedocs.org/projects/pwntools/builds/ You could switch to Python 3 or try adding the `toml; python_version<'3.4'` package to the `docs/requirements.txt` file for a starter? I think only a maintainer with access to the configs can fix this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwn/toplevel.py` Content: ``` 1 # Get all the modules from pwnlib 2 import collections 3 import logging 4 import math 5 import operator 6 import os 7 import platform 8 import re 9 import socks 10 import signal 11 import string 12 import struct 13 import subprocess 14 import sys 15 import tempfile 16 import threading 17 import time 18 19 import colored_traceback 20 from pprint import pprint 21 22 import pwnlib 23 from pwnlib import * 24 from pwnlib.asm import * 25 from pwnlib.context import Thread 26 from pwnlib.context import context, LocalContext 27 from pwnlib.dynelf import DynELF 28 from pwnlib.encoders import * 29 from pwnlib.elf.corefile import Core, Corefile, Coredump 30 from pwnlib.elf.elf import ELF, load 31 from pwnlib.encoders import * 32 from pwnlib.exception import PwnlibException 33 from pwnlib.gdb import attach, debug_assembly, debug_shellcode 34 from pwnlib.filepointer import * 35 from pwnlib.filesystem import * 36 from pwnlib.flag import * 37 from pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split 38 from pwnlib.log import getLogger 39 from pwnlib.memleak import MemLeak, RelativeMemLeak 40 from pwnlib.regsort import * 41 from pwnlib.replacements import * 42 from pwnlib.rop import ROP 43 from pwnlib.rop.call import AppendedArgument 44 from pwnlib.rop.srop import SigreturnFrame 45 from pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload 46 from pwnlib.runner import * 47 from pwnlib.term.readline import str_input 48 from pwnlib.timeout import Timeout 49 from pwnlib.tubes.listen import listen 50 from pwnlib.tubes.process import process, PTY, PIPE, STDOUT 51 from pwnlib.tubes.remote import remote, tcp, udp, connect 52 from pwnlib.tubes.serialtube import serialtube 53 from pwnlib.tubes.server import server 54 from pwnlib.tubes.ssh import ssh 55 from pwnlib.tubes.tube import tube 56 from pwnlib.ui import * 57 from pwnlib.util import crc 58 from pwnlib.util import iters 59 from pwnlib.util import net 60 from pwnlib.util import proc 61 from pwnlib.util import safeeval 62 from pwnlib.util.crc import BitPolynom 63 from pwnlib.util.cyclic import * 64 from pwnlib.util.fiddling import * 65 from pwnlib.util.getdents import * 66 from pwnlib.util.hashes import * 67 from pwnlib.util.lists import * 68 from pwnlib.util.misc import * 69 from pwnlib.util.packing import * 70 from pwnlib.util.proc import pidof 71 from pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with 72 from pwnlib.util.splash import * 73 from pwnlib.util.web import * 74 75 # Promote these modules, so that "from pwn import *" will let you access them 76 77 from six.moves import cPickle as pickle, cStringIO as StringIO 78 from six import BytesIO 79 80 log = getLogger("pwnlib.exploit") 81 error = log.error 82 warning = log.warning 83 warn = log.warning 84 info = log.info 85 debug = log.debug 86 success = log.success 87 88 colored_traceback.add_hook() 89 90 # Equivalence with the default behavior of "from import *" 91 # __all__ = [x for x in tuple(globals()) if not x.startswith('_')] 92 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwn/toplevel.py b/pwn/toplevel.py --- a/pwn/toplevel.py +++ b/pwn/toplevel.py @@ -16,7 +16,6 @@ import threading import time -import colored_traceback from pprint import pprint import pwnlib @@ -85,7 +84,12 @@ debug = log.debug success = log.success -colored_traceback.add_hook() +try: + import colored_traceback +except ImportError: + pass +else: + colored_traceback.add_hook() # Equivalence with the default behavior of "from import *" # __all__ = [x for x in tuple(globals()) if not x.startswith('_')]
{"golden_diff": "diff --git a/pwn/toplevel.py b/pwn/toplevel.py\n--- a/pwn/toplevel.py\n+++ b/pwn/toplevel.py\n@@ -16,7 +16,6 @@\n import threading\n import time\n \n-import colored_traceback\n from pprint import pprint\n \n import pwnlib\n@@ -85,7 +84,12 @@\n debug = log.debug\n success = log.success\n \n-colored_traceback.add_hook()\n+try:\n+ import colored_traceback\n+except ImportError:\n+ pass\n+else:\n+ colored_traceback.add_hook()\n \n # Equivalence with the default behavior of \"from import *\"\n # __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n", "issue": "readthedocs.org builds are broken\nThe docs fail to build for a while, but differently since https://github.com/Gallopsled/pwntools/pull/2179. They're still built using Python 2.7, which had problems installing rpyc and is now missing the toml package before running the setup.py.\r\n\r\nhttps://readthedocs.org/projects/pwntools/builds/\r\n\r\nYou could switch to Python 3 or try adding the `toml; python_version<'3.4'` package to the `docs/requirements.txt` file for a starter? I think only a maintainer with access to the configs can fix this.\n", "before_files": [{"content": "# Get all the modules from pwnlib\nimport collections\nimport logging\nimport math\nimport operator\nimport os\nimport platform\nimport re\nimport socks\nimport signal\nimport string\nimport struct\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nimport colored_traceback\nfrom pprint import pprint\n\nimport pwnlib\nfrom pwnlib import *\nfrom pwnlib.asm import *\nfrom pwnlib.context import Thread\nfrom pwnlib.context import context, LocalContext\nfrom pwnlib.dynelf import DynELF\nfrom pwnlib.encoders import *\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\nfrom pwnlib.elf.elf import ELF, load\nfrom pwnlib.encoders import *\nfrom pwnlib.exception import PwnlibException\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\nfrom pwnlib.filepointer import *\nfrom pwnlib.filesystem import *\nfrom pwnlib.flag import *\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\nfrom pwnlib.log import getLogger\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\nfrom pwnlib.regsort import *\nfrom pwnlib.replacements import *\nfrom pwnlib.rop import ROP\nfrom pwnlib.rop.call import AppendedArgument\nfrom pwnlib.rop.srop import SigreturnFrame\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\nfrom pwnlib.runner import *\nfrom pwnlib.term.readline import str_input\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.listen import listen\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\nfrom pwnlib.tubes.serialtube import serialtube\nfrom pwnlib.tubes.server import server\nfrom pwnlib.tubes.ssh import ssh\nfrom pwnlib.tubes.tube import tube\nfrom pwnlib.ui import *\nfrom pwnlib.util import crc\nfrom pwnlib.util import iters\nfrom pwnlib.util import net\nfrom pwnlib.util import proc\nfrom pwnlib.util import safeeval\nfrom pwnlib.util.crc import BitPolynom\nfrom pwnlib.util.cyclic import *\nfrom pwnlib.util.fiddling import *\nfrom pwnlib.util.getdents import *\nfrom pwnlib.util.hashes import *\nfrom pwnlib.util.lists import *\nfrom pwnlib.util.misc import *\nfrom pwnlib.util.packing import *\nfrom pwnlib.util.proc import pidof\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\nfrom pwnlib.util.splash import *\nfrom pwnlib.util.web import *\n\n# Promote these modules, so that \"from pwn import *\" will let you access them\n\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\nfrom six import BytesIO\n\nlog = getLogger(\"pwnlib.exploit\")\nerror = log.error\nwarning = log.warning\nwarn = log.warning\ninfo = log.info\ndebug = log.debug\nsuccess = log.success\n\ncolored_traceback.add_hook()\n\n# Equivalence with the default behavior of \"from import *\"\n# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n", "path": "pwn/toplevel.py"}], "after_files": [{"content": "# Get all the modules from pwnlib\nimport collections\nimport logging\nimport math\nimport operator\nimport os\nimport platform\nimport re\nimport socks\nimport signal\nimport string\nimport struct\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nfrom pprint import pprint\n\nimport pwnlib\nfrom pwnlib import *\nfrom pwnlib.asm import *\nfrom pwnlib.context import Thread\nfrom pwnlib.context import context, LocalContext\nfrom pwnlib.dynelf import DynELF\nfrom pwnlib.encoders import *\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\nfrom pwnlib.elf.elf import ELF, load\nfrom pwnlib.encoders import *\nfrom pwnlib.exception import PwnlibException\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\nfrom pwnlib.filepointer import *\nfrom pwnlib.filesystem import *\nfrom pwnlib.flag import *\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\nfrom pwnlib.log import getLogger\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\nfrom pwnlib.regsort import *\nfrom pwnlib.replacements import *\nfrom pwnlib.rop import ROP\nfrom pwnlib.rop.call import AppendedArgument\nfrom pwnlib.rop.srop import SigreturnFrame\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\nfrom pwnlib.runner import *\nfrom pwnlib.term.readline import str_input\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.listen import listen\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\nfrom pwnlib.tubes.serialtube import serialtube\nfrom pwnlib.tubes.server import server\nfrom pwnlib.tubes.ssh import ssh\nfrom pwnlib.tubes.tube import tube\nfrom pwnlib.ui import *\nfrom pwnlib.util import crc\nfrom pwnlib.util import iters\nfrom pwnlib.util import net\nfrom pwnlib.util import proc\nfrom pwnlib.util import safeeval\nfrom pwnlib.util.crc import BitPolynom\nfrom pwnlib.util.cyclic import *\nfrom pwnlib.util.fiddling import *\nfrom pwnlib.util.getdents import *\nfrom pwnlib.util.hashes import *\nfrom pwnlib.util.lists import *\nfrom pwnlib.util.misc import *\nfrom pwnlib.util.packing import *\nfrom pwnlib.util.proc import pidof\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\nfrom pwnlib.util.splash import *\nfrom pwnlib.util.web import *\n\n# Promote these modules, so that \"from pwn import *\" will let you access them\n\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\nfrom six import BytesIO\n\nlog = getLogger(\"pwnlib.exploit\")\nerror = log.error\nwarning = log.warning\nwarn = log.warning\ninfo = log.info\ndebug = log.debug\nsuccess = log.success\n\ntry:\n import colored_traceback\nexcept ImportError:\n pass\nelse:\n colored_traceback.add_hook()\n\n# Equivalence with the default behavior of \"from import *\"\n# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n", "path": "pwn/toplevel.py"}]}
1,314
158
gh_patches_debug_11600
rasdani/github-patches
git_diff
nautobot__nautobot-4260
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Set NAUTOBOT_INSTALLATION_METRICS_ENABLED=false in the dockerfile dev stage <!-- NOTE: This template is for use by maintainers only. Please do not submit an issue using this template unless you have been specifically asked to do so. --> ### Proposed Changes This should propagate to all of the app dev instances since they base off of nautobot dev images. Make sure to unset this in the final stage <!-- Provide justification for the proposed change(s). --> ### Justification We don't want to send metrics for dev environments --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `development/nautobot_config.py` Content: ``` 1 """Nautobot development configuration file.""" 2 import os 3 4 from nautobot.core.settings import * # noqa: F403 5 from nautobot.core.settings_funcs import is_truthy 6 7 # 8 # Debugging defaults to True rather than False for the development environment 9 # 10 DEBUG = is_truthy(os.getenv("NAUTOBOT_DEBUG", "True")) 11 12 # Django Debug Toolbar - enabled only when debugging 13 if DEBUG: 14 if "debug_toolbar" not in INSTALLED_APPS: # noqa: F405 15 INSTALLED_APPS.append("debug_toolbar") # noqa: F405 16 if "debug_toolbar.middleware.DebugToolbarMiddleware" not in MIDDLEWARE: # noqa: F405 17 MIDDLEWARE.insert(0, "debug_toolbar.middleware.DebugToolbarMiddleware") # noqa: F405 18 # By default the toolbar only displays when the request is coming from one of INTERNAL_IPS. 19 # For the Docker dev environment, we don't know in advance what that IP may be, so override to skip that check 20 DEBUG_TOOLBAR_CONFIG = {"SHOW_TOOLBAR_CALLBACK": lambda _request: DEBUG} 21 22 # 23 # Logging for the development environment, taking into account the redefinition of DEBUG above 24 # 25 26 LOG_LEVEL = "DEBUG" if DEBUG else "INFO" 27 LOGGING["loggers"]["nautobot"]["handlers"] = ["verbose_console" if DEBUG else "normal_console"] # noqa: F405 28 LOGGING["loggers"]["nautobot"]["level"] = LOG_LEVEL # noqa: F405 29 30 # 31 # Plugins 32 # 33 34 PLUGINS = [ 35 "example_plugin", 36 ] 37 38 39 # 40 # Development Environment for SSO 41 # Configure `invoke.yml` based on example for SSO development environment 42 # 43 44 # OIDC Dev ENV 45 if is_truthy(os.getenv("ENABLE_OIDC", "False")): 46 import requests 47 48 AUTHENTICATION_BACKENDS = ( 49 "social_core.backends.keycloak.KeycloakOAuth2", 50 "nautobot.core.authentication.ObjectPermissionBackend", 51 ) 52 SOCIAL_AUTH_KEYCLOAK_KEY = "nautobot" 53 SOCIAL_AUTH_KEYCLOAK_SECRET = "7b1c3527-8702-4742-af69-2b74ee5742e8" 54 SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = requests.get("http://keycloak:8087/realms/nautobot/", timeout=15).json()[ 55 "public_key" 56 ] 57 SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = "http://localhost:8087/realms/nautobot/protocol/openid-connect/auth" 58 SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = "http://keycloak:8087/realms/nautobot/protocol/openid-connect/token" 59 SOCIAL_AUTH_KEYCLOAK_VERIFY_SSL = False 60 61 METRICS_ENABLED = True 62 63 CELERY_WORKER_PROMETHEUS_PORTS = [8080] 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/development/nautobot_config.py b/development/nautobot_config.py --- a/development/nautobot_config.py +++ b/development/nautobot_config.py @@ -19,6 +19,9 @@ # For the Docker dev environment, we don't know in advance what that IP may be, so override to skip that check DEBUG_TOOLBAR_CONFIG = {"SHOW_TOOLBAR_CALLBACK": lambda _request: DEBUG} +# Do *not* send anonymized install metrics when post_upgrade or send_installation_metrics management commands are run +INSTALLATION_METRICS_ENABLED = is_truthy(os.getenv("NAUTOBOT_INSTALLATION_METRICS_ENABLED", "False")) + # # Logging for the development environment, taking into account the redefinition of DEBUG above #
{"golden_diff": "diff --git a/development/nautobot_config.py b/development/nautobot_config.py\n--- a/development/nautobot_config.py\n+++ b/development/nautobot_config.py\n@@ -19,6 +19,9 @@\n # For the Docker dev environment, we don't know in advance what that IP may be, so override to skip that check\n DEBUG_TOOLBAR_CONFIG = {\"SHOW_TOOLBAR_CALLBACK\": lambda _request: DEBUG}\n \n+# Do *not* send anonymized install metrics when post_upgrade or send_installation_metrics management commands are run\n+INSTALLATION_METRICS_ENABLED = is_truthy(os.getenv(\"NAUTOBOT_INSTALLATION_METRICS_ENABLED\", \"False\"))\n+\n #\n # Logging for the development environment, taking into account the redefinition of DEBUG above\n #\n", "issue": "Set NAUTOBOT_INSTALLATION_METRICS_ENABLED=false in the dockerfile dev stage\n<!--\r\n NOTE: This template is for use by maintainers only. Please do not submit\r\n an issue using this template unless you have been specifically asked to\r\n do so.\r\n-->\r\n### Proposed Changes\r\n\r\nThis should propagate to all of the app dev instances since they base off of nautobot dev images. Make sure to unset this in the final stage\r\n\r\n<!-- Provide justification for the proposed change(s). -->\r\n### Justification\r\n\r\nWe don't want to send metrics for dev environments\n", "before_files": [{"content": "\"\"\"Nautobot development configuration file.\"\"\"\nimport os\n\nfrom nautobot.core.settings import * # noqa: F403\nfrom nautobot.core.settings_funcs import is_truthy\n\n#\n# Debugging defaults to True rather than False for the development environment\n#\nDEBUG = is_truthy(os.getenv(\"NAUTOBOT_DEBUG\", \"True\"))\n\n# Django Debug Toolbar - enabled only when debugging\nif DEBUG:\n if \"debug_toolbar\" not in INSTALLED_APPS: # noqa: F405\n INSTALLED_APPS.append(\"debug_toolbar\") # noqa: F405\n if \"debug_toolbar.middleware.DebugToolbarMiddleware\" not in MIDDLEWARE: # noqa: F405\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\") # noqa: F405\n # By default the toolbar only displays when the request is coming from one of INTERNAL_IPS.\n # For the Docker dev environment, we don't know in advance what that IP may be, so override to skip that check\n DEBUG_TOOLBAR_CONFIG = {\"SHOW_TOOLBAR_CALLBACK\": lambda _request: DEBUG}\n\n#\n# Logging for the development environment, taking into account the redefinition of DEBUG above\n#\n\nLOG_LEVEL = \"DEBUG\" if DEBUG else \"INFO\"\nLOGGING[\"loggers\"][\"nautobot\"][\"handlers\"] = [\"verbose_console\" if DEBUG else \"normal_console\"] # noqa: F405\nLOGGING[\"loggers\"][\"nautobot\"][\"level\"] = LOG_LEVEL # noqa: F405\n\n#\n# Plugins\n#\n\nPLUGINS = [\n \"example_plugin\",\n]\n\n\n#\n# Development Environment for SSO\n# Configure `invoke.yml` based on example for SSO development environment\n#\n\n# OIDC Dev ENV\nif is_truthy(os.getenv(\"ENABLE_OIDC\", \"False\")):\n import requests\n\n AUTHENTICATION_BACKENDS = (\n \"social_core.backends.keycloak.KeycloakOAuth2\",\n \"nautobot.core.authentication.ObjectPermissionBackend\",\n )\n SOCIAL_AUTH_KEYCLOAK_KEY = \"nautobot\"\n SOCIAL_AUTH_KEYCLOAK_SECRET = \"7b1c3527-8702-4742-af69-2b74ee5742e8\"\n SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = requests.get(\"http://keycloak:8087/realms/nautobot/\", timeout=15).json()[\n \"public_key\"\n ]\n SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = \"http://localhost:8087/realms/nautobot/protocol/openid-connect/auth\"\n SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = \"http://keycloak:8087/realms/nautobot/protocol/openid-connect/token\"\n SOCIAL_AUTH_KEYCLOAK_VERIFY_SSL = False\n\nMETRICS_ENABLED = True\n\nCELERY_WORKER_PROMETHEUS_PORTS = [8080]\n", "path": "development/nautobot_config.py"}], "after_files": [{"content": "\"\"\"Nautobot development configuration file.\"\"\"\nimport os\n\nfrom nautobot.core.settings import * # noqa: F403\nfrom nautobot.core.settings_funcs import is_truthy\n\n#\n# Debugging defaults to True rather than False for the development environment\n#\nDEBUG = is_truthy(os.getenv(\"NAUTOBOT_DEBUG\", \"True\"))\n\n# Django Debug Toolbar - enabled only when debugging\nif DEBUG:\n if \"debug_toolbar\" not in INSTALLED_APPS: # noqa: F405\n INSTALLED_APPS.append(\"debug_toolbar\") # noqa: F405\n if \"debug_toolbar.middleware.DebugToolbarMiddleware\" not in MIDDLEWARE: # noqa: F405\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\") # noqa: F405\n # By default the toolbar only displays when the request is coming from one of INTERNAL_IPS.\n # For the Docker dev environment, we don't know in advance what that IP may be, so override to skip that check\n DEBUG_TOOLBAR_CONFIG = {\"SHOW_TOOLBAR_CALLBACK\": lambda _request: DEBUG}\n\n# Do *not* send anonymized install metrics when post_upgrade or send_installation_metrics management commands are run\nINSTALLATION_METRICS_ENABLED = is_truthy(os.getenv(\"NAUTOBOT_INSTALLATION_METRICS_ENABLED\", \"False\"))\n\n#\n# Logging for the development environment, taking into account the redefinition of DEBUG above\n#\n\nLOG_LEVEL = \"DEBUG\" if DEBUG else \"INFO\"\nLOGGING[\"loggers\"][\"nautobot\"][\"handlers\"] = [\"verbose_console\" if DEBUG else \"normal_console\"] # noqa: F405\nLOGGING[\"loggers\"][\"nautobot\"][\"level\"] = LOG_LEVEL # noqa: F405\n\n#\n# Plugins\n#\n\nPLUGINS = [\n \"example_plugin\",\n]\n\n\n#\n# Development Environment for SSO\n# Configure `invoke.yml` based on example for SSO development environment\n#\n\n# OIDC Dev ENV\nif is_truthy(os.getenv(\"ENABLE_OIDC\", \"False\")):\n import requests\n\n AUTHENTICATION_BACKENDS = (\n \"social_core.backends.keycloak.KeycloakOAuth2\",\n \"nautobot.core.authentication.ObjectPermissionBackend\",\n )\n SOCIAL_AUTH_KEYCLOAK_KEY = \"nautobot\"\n SOCIAL_AUTH_KEYCLOAK_SECRET = \"7b1c3527-8702-4742-af69-2b74ee5742e8\"\n SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = requests.get(\"http://keycloak:8087/realms/nautobot/\", timeout=15).json()[\n \"public_key\"\n ]\n SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = \"http://localhost:8087/realms/nautobot/protocol/openid-connect/auth\"\n SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = \"http://keycloak:8087/realms/nautobot/protocol/openid-connect/token\"\n SOCIAL_AUTH_KEYCLOAK_VERIFY_SSL = False\n\nMETRICS_ENABLED = True\n\nCELERY_WORKER_PROMETHEUS_PORTS = [8080]\n", "path": "development/nautobot_config.py"}]}
1,135
166
gh_patches_debug_7368
rasdani/github-patches
git_diff
learningequality__kolibri-9397
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fresh Kolibri dev environment fails to install properly ## Observed behavior I took these steps: - Clone Kolibri to fresh local repo - Ensure `develop` is checked out - `python -m venv venv` - `source venv/bin/activate` - `pip install -r` every file in the `requirements/` directory - `pip install -e .` - `yarn install` - `yarn devserver` Then I get the error noted below. Seems like Django isn't being installed correctly or something. I've copied the `venv` from another worktree in and the branch works as expected until I `pip install...` everything again. There was a recent bump to Morango in the requirements.txt but I can't imagine why that would cause this problem. ## Errors and logs <!-- Relevant logs from: * the command line * ~/.kolibri/logs/kolibri.txt * the browser console Please wrap errors in triple backticks for clean formatting like this: ``` 01:10 info: something happened 01:12 error: something bad happened ``` --> ``` ╰─ yarn devserver yarn run v1.22.15 $ run-p python-devserver lint-frontend:watch:format hashi-dev watch > kolibri-root@0.12.0-dev.1 hashi-dev > yarn workspace hashi run dev > kolibri-root@0.12.0-dev.1 watch > kolibri-tools build dev --file ./build_tools/build_plugins.txt --cache > kolibri-root@0.12.0-dev.1 python-devserver > kolibri start --debug --foreground --port=8000 --settings=kolibri.deployment.default.settings.dev > kolibri-root@0.12.0-dev.1 lint-frontend:watch:format > yarn run lint-frontend --monitor --write $ kolibri-tools lint --pattern '{kolibri*/**/assets,packages,build_tools}/**/*.{js,vue,scss,less,css}' --ignore '**/dist/**,**/node_modules/**,**/static/**,**/kolibri-core-for-export/**' --monitor --write $ yarn run build-base --mode=development --watch $ webpack --config ./webpack.config.js --mode=development --watch INFO Option DEBUG in section [Server] being overridden by environment variable KOLIBRI_DEBUG INFO Option DEBUG_LOG_DATABASE in section [Server] being overridden by environment variable KOLIBRI_DEBUG_LOG_DATABASE INFO Option RUN_MODE in section [Deployment] being overridden by environment variable KOLIBRI_RUN_MODE /home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use "pip install psycopg2-binary" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>. """) Error: Traceback (most recent call last): File "/home/jacob/kill-this-kolibri/kolibri/utils/cli.py", line 193, in invoke initialize(**get_initialize_params()) File "/home/jacob/kill-this-kolibri/kolibri/utils/main.py", line 279, in initialize _setup_django() File "/home/jacob/kill-this-kolibri/kolibri/utils/main.py", line 153, in _setup_django django.setup() File "/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/django/__init__.py", line 27, in setup apps.populate(settings.INSTALLED_APPS) File "/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/django/apps/registry.py", line 85, in populate app_config = AppConfig.create(entry) File "/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/django/apps/config.py", line 94, in create module = import_module(entry) File "/home/jacob/.pyenv/versions/3.6.12/lib/python3.6/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 994, in _gcd_import File "<frozen importlib._bootstrap>", line 971, in _find_and_load File "<frozen importlib._bootstrap>", line 953, in _find_and_load_unlocked ModuleNotFoundError: No module named 'django_extensions' ERROR: "python-devserver" exited with 1. error Command failed with exit code 1. info Visit https://yarnpkg.com/en/docs/cli/run for documentation about this command. ``` ## Expected behavior <!-- Description of what behavior was expected but did not occur --> Server should start up without issue. ## User-facing consequences… <!-- Implications and real-world consequences for learners, coaches, admins, and other users of the application --> Devs cannot create new working local environments. ## Steps to reproduce <!-- Precise steps that someone else can follow in order to see this behavior --> See the above. ## Context <!-- Tell us about your environment, including: * Kolibri version * Operating system * Browser --> `develop` Fedora --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/deployment/default/settings/dev.py` Content: ``` 1 from __future__ import absolute_import 2 from __future__ import print_function 3 from __future__ import unicode_literals 4 5 import os 6 7 from .base import * # noqa isort:skip @UnusedWildImport 8 9 DEBUG = True 10 11 # Settings might be tuples, so switch to lists 12 INSTALLED_APPS = list(INSTALLED_APPS) + ["drf_yasg", "django_extensions"] # noqa F405 13 webpack_middleware = "kolibri.core.webpack.middleware.WebpackErrorHandler" 14 no_login_popup_middleware = ( 15 "kolibri.core.auth.middleware.XhrPreventLoginPromptMiddleware" 16 ) 17 MIDDLEWARE = list(MIDDLEWARE) + [ # noqa F405 18 webpack_middleware, 19 no_login_popup_middleware, 20 ] 21 22 INTERNAL_IPS = ["127.0.0.1"] 23 24 ROOT_URLCONF = "kolibri.deployment.default.dev_urls" 25 26 DEVELOPER_MODE = True 27 os.environ.update({"KOLIBRI_DEVELOPER_MODE": "True"}) 28 29 try: 30 process_cache = CACHES["process_cache"] # noqa F405 31 except KeyError: 32 process_cache = None 33 34 # Create a memcache for each cache 35 CACHES = { 36 key: {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"} 37 for key in CACHES # noqa F405 38 } 39 40 if process_cache: 41 CACHES["process_cache"] = process_cache 42 43 44 REST_FRAMEWORK = { 45 "UNAUTHENTICATED_USER": "kolibri.core.auth.models.KolibriAnonymousUser", 46 "DEFAULT_AUTHENTICATION_CLASSES": [ 47 # Activate basic auth for external API testing tools 48 "rest_framework.authentication.BasicAuthentication", 49 "rest_framework.authentication.SessionAuthentication", 50 ], 51 "DEFAULT_RENDERER_CLASSES": ( 52 "rest_framework.renderers.JSONRenderer", 53 "rest_framework.renderers.BrowsableAPIRenderer", 54 ), 55 "EXCEPTION_HANDLER": "kolibri.core.utils.exception_handler.custom_exception_handler", 56 } 57 58 SWAGGER_SETTINGS = {"DEFAULT_INFO": "kolibri.deployment.default.dev_urls.api_info"} 59 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/deployment/default/settings/dev.py b/kolibri/deployment/default/settings/dev.py --- a/kolibri/deployment/default/settings/dev.py +++ b/kolibri/deployment/default/settings/dev.py @@ -9,7 +9,7 @@ DEBUG = True # Settings might be tuples, so switch to lists -INSTALLED_APPS = list(INSTALLED_APPS) + ["drf_yasg", "django_extensions"] # noqa F405 +INSTALLED_APPS = list(INSTALLED_APPS) + ["drf_yasg"] # noqa F405 webpack_middleware = "kolibri.core.webpack.middleware.WebpackErrorHandler" no_login_popup_middleware = ( "kolibri.core.auth.middleware.XhrPreventLoginPromptMiddleware"
{"golden_diff": "diff --git a/kolibri/deployment/default/settings/dev.py b/kolibri/deployment/default/settings/dev.py\n--- a/kolibri/deployment/default/settings/dev.py\n+++ b/kolibri/deployment/default/settings/dev.py\n@@ -9,7 +9,7 @@\n DEBUG = True\n \n # Settings might be tuples, so switch to lists\n-INSTALLED_APPS = list(INSTALLED_APPS) + [\"drf_yasg\", \"django_extensions\"] # noqa F405\n+INSTALLED_APPS = list(INSTALLED_APPS) + [\"drf_yasg\"] # noqa F405\n webpack_middleware = \"kolibri.core.webpack.middleware.WebpackErrorHandler\"\n no_login_popup_middleware = (\n \"kolibri.core.auth.middleware.XhrPreventLoginPromptMiddleware\"\n", "issue": "Fresh Kolibri dev environment fails to install properly\n## Observed behavior\r\n\r\nI took these steps:\r\n\r\n- Clone Kolibri to fresh local repo\r\n- Ensure `develop` is checked out\r\n- `python -m venv venv`\r\n- `source venv/bin/activate` \r\n- `pip install -r` every file in the `requirements/` directory\r\n- `pip install -e .`\r\n- `yarn install`\r\n- `yarn devserver`\r\n\r\nThen I get the error noted below. Seems like Django isn't being installed correctly or something. I've copied the `venv` from another worktree in and the branch works as expected until I `pip install...` everything again.\r\n\r\nThere was a recent bump to Morango in the requirements.txt but I can't imagine why that would cause this problem.\r\n\r\n## Errors and logs\r\n<!--\r\nRelevant logs from:\r\n * the command line\r\n * ~/.kolibri/logs/kolibri.txt\r\n * the browser console\r\n\r\nPlease wrap errors in triple backticks for clean formatting like this:\r\n```\r\n01:10 info: something happened\r\n01:12 error: something bad happened\r\n```\r\n-->\r\n\r\n```\r\n\u2570\u2500 yarn devserver \r\nyarn run v1.22.15\r\n$ run-p python-devserver lint-frontend:watch:format hashi-dev watch\r\n\r\n> kolibri-root@0.12.0-dev.1 hashi-dev\r\n> yarn workspace hashi run dev\r\n\r\n\r\n> kolibri-root@0.12.0-dev.1 watch\r\n> kolibri-tools build dev --file ./build_tools/build_plugins.txt --cache\r\n\r\n\r\n> kolibri-root@0.12.0-dev.1 python-devserver\r\n> kolibri start --debug --foreground --port=8000 --settings=kolibri.deployment.default.settings.dev\r\n\r\n\r\n> kolibri-root@0.12.0-dev.1 lint-frontend:watch:format\r\n> yarn run lint-frontend --monitor --write\r\n\r\n$ kolibri-tools lint --pattern '{kolibri*/**/assets,packages,build_tools}/**/*.{js,vue,scss,less,css}' --ignore '**/dist/**,**/node_modules/**,**/static/**,**/kolibri-core-for-export/**' --monitor --write\r\n$ yarn run build-base --mode=development --watch\r\n$ webpack --config ./webpack.config.js --mode=development --watch\r\nINFO Option DEBUG in section [Server] being overridden by environment variable KOLIBRI_DEBUG\r\nINFO Option DEBUG_LOG_DATABASE in section [Server] being overridden by environment variable KOLIBRI_DEBUG_LOG_DATABASE\r\nINFO Option RUN_MODE in section [Deployment] being overridden by environment variable KOLIBRI_RUN_MODE\r\n/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use \"pip install psycopg2-binary\" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>.\r\n \"\"\")\r\nError: Traceback (most recent call last):\r\n File \"/home/jacob/kill-this-kolibri/kolibri/utils/cli.py\", line 193, in invoke\r\n initialize(**get_initialize_params())\r\n File \"/home/jacob/kill-this-kolibri/kolibri/utils/main.py\", line 279, in initialize\r\n _setup_django()\r\n File \"/home/jacob/kill-this-kolibri/kolibri/utils/main.py\", line 153, in _setup_django\r\n django.setup()\r\n File \"/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/django/__init__.py\", line 27, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/django/apps/registry.py\", line 85, in populate\r\n app_config = AppConfig.create(entry)\r\n File \"/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/django/apps/config.py\", line 94, in create\r\n module = import_module(entry)\r\n File \"/home/jacob/.pyenv/versions/3.6.12/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 953, in _find_and_load_unlocked\r\nModuleNotFoundError: No module named 'django_extensions'\r\n\r\nERROR: \"python-devserver\" exited with 1.\r\nerror Command failed with exit code 1.\r\ninfo Visit https://yarnpkg.com/en/docs/cli/run for documentation about this command.\r\n```\r\n\r\n## Expected behavior\r\n<!--\r\nDescription of what behavior was expected but did not occur\r\n-->\r\n\r\nServer should start up without issue.\r\n\r\n## User-facing consequences\u2026\r\n\r\n<!--\r\nImplications and real-world consequences for learners, coaches, admins, and other users of the application\r\n-->\r\n\r\nDevs cannot create new working local environments.\r\n\r\n## Steps to reproduce\r\n<!--\r\nPrecise steps that someone else can follow in order to see this behavior\r\n-->\r\n\r\nSee the above.\r\n\r\n## Context\r\n<!--\r\nTell us about your environment, including:\r\n * Kolibri version\r\n * Operating system\r\n * Browser\r\n-->\r\n\r\n`develop`\r\nFedora\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\n\nfrom .base import * # noqa isort:skip @UnusedWildImport\n\nDEBUG = True\n\n# Settings might be tuples, so switch to lists\nINSTALLED_APPS = list(INSTALLED_APPS) + [\"drf_yasg\", \"django_extensions\"] # noqa F405\nwebpack_middleware = \"kolibri.core.webpack.middleware.WebpackErrorHandler\"\nno_login_popup_middleware = (\n \"kolibri.core.auth.middleware.XhrPreventLoginPromptMiddleware\"\n)\nMIDDLEWARE = list(MIDDLEWARE) + [ # noqa F405\n webpack_middleware,\n no_login_popup_middleware,\n]\n\nINTERNAL_IPS = [\"127.0.0.1\"]\n\nROOT_URLCONF = \"kolibri.deployment.default.dev_urls\"\n\nDEVELOPER_MODE = True\nos.environ.update({\"KOLIBRI_DEVELOPER_MODE\": \"True\"})\n\ntry:\n process_cache = CACHES[\"process_cache\"] # noqa F405\nexcept KeyError:\n process_cache = None\n\n# Create a memcache for each cache\nCACHES = {\n key: {\"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\"}\n for key in CACHES # noqa F405\n}\n\nif process_cache:\n CACHES[\"process_cache\"] = process_cache\n\n\nREST_FRAMEWORK = {\n \"UNAUTHENTICATED_USER\": \"kolibri.core.auth.models.KolibriAnonymousUser\",\n \"DEFAULT_AUTHENTICATION_CLASSES\": [\n # Activate basic auth for external API testing tools\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ],\n \"DEFAULT_RENDERER_CLASSES\": (\n \"rest_framework.renderers.JSONRenderer\",\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n ),\n \"EXCEPTION_HANDLER\": \"kolibri.core.utils.exception_handler.custom_exception_handler\",\n}\n\nSWAGGER_SETTINGS = {\"DEFAULT_INFO\": \"kolibri.deployment.default.dev_urls.api_info\"}\n", "path": "kolibri/deployment/default/settings/dev.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\n\nfrom .base import * # noqa isort:skip @UnusedWildImport\n\nDEBUG = True\n\n# Settings might be tuples, so switch to lists\nINSTALLED_APPS = list(INSTALLED_APPS) + [\"drf_yasg\"] # noqa F405\nwebpack_middleware = \"kolibri.core.webpack.middleware.WebpackErrorHandler\"\nno_login_popup_middleware = (\n \"kolibri.core.auth.middleware.XhrPreventLoginPromptMiddleware\"\n)\nMIDDLEWARE = list(MIDDLEWARE) + [ # noqa F405\n webpack_middleware,\n no_login_popup_middleware,\n]\n\nINTERNAL_IPS = [\"127.0.0.1\"]\n\nROOT_URLCONF = \"kolibri.deployment.default.dev_urls\"\n\nDEVELOPER_MODE = True\nos.environ.update({\"KOLIBRI_DEVELOPER_MODE\": \"True\"})\n\ntry:\n process_cache = CACHES[\"process_cache\"] # noqa F405\nexcept KeyError:\n process_cache = None\n\n# Create a memcache for each cache\nCACHES = {\n key: {\"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\"}\n for key in CACHES # noqa F405\n}\n\nif process_cache:\n CACHES[\"process_cache\"] = process_cache\n\n\nREST_FRAMEWORK = {\n \"UNAUTHENTICATED_USER\": \"kolibri.core.auth.models.KolibriAnonymousUser\",\n \"DEFAULT_AUTHENTICATION_CLASSES\": [\n # Activate basic auth for external API testing tools\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ],\n \"DEFAULT_RENDERER_CLASSES\": (\n \"rest_framework.renderers.JSONRenderer\",\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n ),\n \"EXCEPTION_HANDLER\": \"kolibri.core.utils.exception_handler.custom_exception_handler\",\n}\n\nSWAGGER_SETTINGS = {\"DEFAULT_INFO\": \"kolibri.deployment.default.dev_urls.api_info\"}\n", "path": "kolibri/deployment/default/settings/dev.py"}]}
2,032
168
gh_patches_debug_14541
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-5006
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Impossible to build with matplotlib 3.3 on Python 3.7 and 3.8 ## Description of the issue Trying a simple program main.py ```python import matplotlib.pyplot as plt plt.plot([0,1,2], [0,1,1]) ``` Running the application created with pyinstaller --hiddenimport='pkg_resources.py2_warn' main.py I get: ``` Traceback (most recent call last): File "main.py", line 3, in <module> File "<frozen importlib._bootstrap>", line 983, in _find_and_load File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 677, in _load_unlocked File "c:\users\***\appdata\local\programs\python\python37\lib\site-packages\PyInstaller\loader\pyimod03_importers.py", line 623, in exec_module File "site-packages\matplotlib\__init__.py", line 898, in <module> File "site-packages\matplotlib\cbook\__init__.py", line 480, in _get_data_path File "site-packages\matplotlib\__init__.py", line 239, in wrapper File "site-packages\matplotlib\__init__.py", line 534, in get_data_path File "site-packages\matplotlib\__init__.py", line 239, in wrapper File "site-packages\matplotlib\__init__.py", line 563, in _get_data_path NameError: name 'defaultParams' is not defined [3064] Failed to execute script main ``` ## Context information (for bug reports) * Output of `pyinstaller --version`: ```3.6 and 4.0-dev0``` * Version of Python: 3.7 and 3.8 * Platform: Windows (english 10) * Did you also try this on another platform? No ## What solves it Reverting to matplotlib 3.2.2 resolve the issue (temporarily). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `PyInstaller/hooks/hook-matplotlib.py` Content: ``` 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2013-2020, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License (version 2 5 # or later) with exception for distributing the bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) 10 #----------------------------------------------------------------------------- 11 12 13 from PyInstaller.utils.hooks import exec_statement 14 15 mpl_data_dir = exec_statement( 16 "import matplotlib; print(matplotlib._get_data_path())") 17 18 datas = [ 19 (mpl_data_dir, "mpl-data"), 20 ] 21 ``` Path: `PyInstaller/hooks/rthooks/pyi_rth_mpldata.py` Content: ``` 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2013-2020, PyInstaller Development Team. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: Apache-2.0 10 #----------------------------------------------------------------------------- 11 12 13 import os 14 import sys 15 16 os.environ["MATPLOTLIBDATA"] = os.path.join(sys._MEIPASS, "mpl-data") 17 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py --- a/PyInstaller/hooks/hook-matplotlib.py +++ b/PyInstaller/hooks/hook-matplotlib.py @@ -16,5 +16,5 @@ "import matplotlib; print(matplotlib._get_data_path())") datas = [ - (mpl_data_dir, "mpl-data"), + (mpl_data_dir, "matplotlib/mpl-data"), ] diff --git a/PyInstaller/hooks/rthooks/pyi_rth_mpldata.py b/PyInstaller/hooks/rthooks/pyi_rth_mpldata.py deleted file mode 100644 --- a/PyInstaller/hooks/rthooks/pyi_rth_mpldata.py +++ /dev/null @@ -1,16 +0,0 @@ -#----------------------------------------------------------------------------- -# Copyright (c) 2013-2020, PyInstaller Development Team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# -# SPDX-License-Identifier: Apache-2.0 -#----------------------------------------------------------------------------- - - -import os -import sys - -os.environ["MATPLOTLIBDATA"] = os.path.join(sys._MEIPASS, "mpl-data")
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py\n--- a/PyInstaller/hooks/hook-matplotlib.py\n+++ b/PyInstaller/hooks/hook-matplotlib.py\n@@ -16,5 +16,5 @@\n \"import matplotlib; print(matplotlib._get_data_path())\")\n \n datas = [\n- (mpl_data_dir, \"mpl-data\"),\n+ (mpl_data_dir, \"matplotlib/mpl-data\"),\n ]\ndiff --git a/PyInstaller/hooks/rthooks/pyi_rth_mpldata.py b/PyInstaller/hooks/rthooks/pyi_rth_mpldata.py\ndeleted file mode 100644\n--- a/PyInstaller/hooks/rthooks/pyi_rth_mpldata.py\n+++ /dev/null\n@@ -1,16 +0,0 @@\n-#-----------------------------------------------------------------------------\n-# Copyright (c) 2013-2020, PyInstaller Development Team.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-#\n-# The full license is in the file COPYING.txt, distributed with this software.\n-#\n-# SPDX-License-Identifier: Apache-2.0\n-#-----------------------------------------------------------------------------\n-\n-\n-import os\n-import sys\n-\n-os.environ[\"MATPLOTLIBDATA\"] = os.path.join(sys._MEIPASS, \"mpl-data\")\n", "issue": "Impossible to build with matplotlib 3.3 on Python 3.7 and 3.8\n## Description of the issue\r\n\r\nTrying a simple program main.py\r\n```python\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.plot([0,1,2], [0,1,1])\r\n```\r\n\r\nRunning the application created with pyinstaller --hiddenimport='pkg_resources.py2_warn' main.py\r\n\r\nI get:\r\n\r\n```\r\nTraceback (most recent call last): File \"main.py\", line 3, in <module> File \"<frozen importlib._bootstrap>\", line 983, in _find_and_load File \"<frozen importlib._bootstrap>\", line 967, in _find_and_load_unlocked File \"<frozen importlib._bootstrap>\", line 677, in _load_unlocked File \"c:\\users\\***\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\PyInstaller\\loader\\pyimod03_importers.py\", line 623, in exec_module File \"site-packages\\matplotlib\\__init__.py\", line 898, in <module> File \"site-packages\\matplotlib\\cbook\\__init__.py\", line 480, in _get_data_path File \"site-packages\\matplotlib\\__init__.py\", line 239, in wrapper File \"site-packages\\matplotlib\\__init__.py\", line 534, in get_data_path File \"site-packages\\matplotlib\\__init__.py\", line 239, in wrapper File \"site-packages\\matplotlib\\__init__.py\", line 563, in _get_data_path NameError: name 'defaultParams' is not defined [3064] Failed to execute script main\r\n```\r\n## Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```3.6 and 4.0-dev0```\r\n* Version of Python: 3.7 and 3.8\r\n* Platform: Windows (english 10)\r\n* Did you also try this on another platform? No\r\n\r\n## What solves it\r\n\r\nReverting to matplotlib 3.2.2 resolve the issue (temporarily).\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n\nfrom PyInstaller.utils.hooks import exec_statement\n\nmpl_data_dir = exec_statement(\n \"import matplotlib; print(matplotlib._get_data_path())\")\n\ndatas = [\n (mpl_data_dir, \"mpl-data\"),\n]\n", "path": "PyInstaller/hooks/hook-matplotlib.py"}, {"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2020, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n\n\nimport os\nimport sys\n\nos.environ[\"MATPLOTLIBDATA\"] = os.path.join(sys._MEIPASS, \"mpl-data\")\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_mpldata.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n\nfrom PyInstaller.utils.hooks import exec_statement\n\nmpl_data_dir = exec_statement(\n \"import matplotlib; print(matplotlib._get_data_path())\")\n\ndatas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n]\n", "path": "PyInstaller/hooks/hook-matplotlib.py"}, {"content": null, "path": "PyInstaller/hooks/rthooks/pyi_rth_mpldata.py"}]}
1,113
317
gh_patches_debug_16772
rasdani/github-patches
git_diff
saulpw__visidata-1584
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ZSH completion fails with invalid option definition I'm using v2.10.2 installed via the [latest Arch package](https://archlinux.org/packages/community/any/visidata/). Visidata shell completion in my ZSH is currently broken. ``` $ vd <tab> _arguments:comparguments:327: invalid option definition: --fmt_expand_list[format str to use for names of columns expanded from list (colname, index) (default: %s[%s])]:str ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dev/zsh-completion.py` Content: ``` 1 #!/usr/bin/env python 2 from __future__ import unicode_literals 3 4 import os 5 from os.path import dirname as dirn 6 import sys 7 import re 8 9 sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) 10 from visidata import vd 11 from visidata.main import option_aliases 12 13 ZSH_COMPLETION_FILE = "_visidata" 14 ZSH_COMPLETION_TEMPLATE = "dev/zsh-completion.in" 15 pat_class = re.compile("'(.*)'") 16 pat_select = re.compile("^\([^)]*\)") 17 18 19 def generate_completion(opt): 20 prefix = "--" + opt.name 21 shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name] 22 if len(shortnames): 23 if len(shortnames[0]) == 1: 24 shortname = "-" + shortnames[0] 25 else: 26 shortname = "--" + shortnames[0] 27 prefix = "{" + f"{shortname},{prefix}" + "}" 28 if isinstance(opt.value, bool): 29 completion = "" 30 else: 31 completion = ":" + pat_class.findall(str(opt.value.__class__))[0] 32 if opt.name in ["play", "output", "visidata_dir", "config"]: 33 completion += ":_files" 34 elif opt.name in ["plugins_url", "motd_url"]: 35 completion += ":_urls" 36 helpstr = opt.helpstr.replace("[", "\\[").replace("]", "\\]") 37 selections = pat_select.findall(helpstr) 38 if len(selections): 39 completion += f":{selections[0].replace('/', ' ')}" 40 # TODO: use `zstyle ':completion:*' extra-verbose true` 41 # to control the display of default value 42 helpstr = helpstr + f" (default: {opt.value})" 43 return f"{prefix}'[{helpstr}]{completion}'" 44 45 46 flags = [generate_completion(vd._options[opt]["default"]) for opt in vd._options] 47 48 with open(ZSH_COMPLETION_TEMPLATE) as f: 49 template = f.read() 50 51 template = template.replace("{{flags}}", " \\\n ".join(flags)) 52 53 with open(ZSH_COMPLETION_FILE, "w") as f: 54 f.write(template) 55 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dev/zsh-completion.py b/dev/zsh-completion.py --- a/dev/zsh-completion.py +++ b/dev/zsh-completion.py @@ -33,13 +33,14 @@ completion += ":_files" elif opt.name in ["plugins_url", "motd_url"]: completion += ":_urls" - helpstr = opt.helpstr.replace("[", "\\[").replace("]", "\\]") + helpstr = opt.helpstr selections = pat_select.findall(helpstr) if len(selections): completion += f":{selections[0].replace('/', ' ')}" # TODO: use `zstyle ':completion:*' extra-verbose true` # to control the display of default value helpstr = helpstr + f" (default: {opt.value})" + helpstr = helpstr.replace("[", "\\[").replace("]", "\\]") return f"{prefix}'[{helpstr}]{completion}'"
{"golden_diff": "diff --git a/dev/zsh-completion.py b/dev/zsh-completion.py\n--- a/dev/zsh-completion.py\n+++ b/dev/zsh-completion.py\n@@ -33,13 +33,14 @@\n completion += \":_files\"\n elif opt.name in [\"plugins_url\", \"motd_url\"]:\n completion += \":_urls\"\n- helpstr = opt.helpstr.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\")\n+ helpstr = opt.helpstr\n selections = pat_select.findall(helpstr)\n if len(selections):\n completion += f\":{selections[0].replace('/', ' ')}\"\n # TODO: use `zstyle ':completion:*' extra-verbose true`\n # to control the display of default value\n helpstr = helpstr + f\" (default: {opt.value})\"\n+ helpstr = helpstr.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\")\n return f\"{prefix}'[{helpstr}]{completion}'\"\n", "issue": "ZSH completion fails with invalid option definition\nI'm using v2.10.2 installed via the [latest Arch package](https://archlinux.org/packages/community/any/visidata/). Visidata shell completion in my ZSH is currently broken.\r\n\r\n```\r\n$ vd <tab>\r\n_arguments:comparguments:327: invalid option definition: --fmt_expand_list[format str to use for names of columns expanded from list (colname, index) (default: %s[%s])]:str\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\n\nimport os\nfrom os.path import dirname as dirn\nimport sys\nimport re\n\nsys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))\nfrom visidata import vd\nfrom visidata.main import option_aliases\n\nZSH_COMPLETION_FILE = \"_visidata\"\nZSH_COMPLETION_TEMPLATE = \"dev/zsh-completion.in\"\npat_class = re.compile(\"'(.*)'\")\npat_select = re.compile(\"^\\([^)]*\\)\")\n\n\ndef generate_completion(opt):\n prefix = \"--\" + opt.name\n shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]\n if len(shortnames):\n if len(shortnames[0]) == 1:\n shortname = \"-\" + shortnames[0]\n else:\n shortname = \"--\" + shortnames[0]\n prefix = \"{\" + f\"{shortname},{prefix}\" + \"}\"\n if isinstance(opt.value, bool):\n completion = \"\"\n else:\n completion = \":\" + pat_class.findall(str(opt.value.__class__))[0]\n if opt.name in [\"play\", \"output\", \"visidata_dir\", \"config\"]:\n completion += \":_files\"\n elif opt.name in [\"plugins_url\", \"motd_url\"]:\n completion += \":_urls\"\n helpstr = opt.helpstr.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\")\n selections = pat_select.findall(helpstr)\n if len(selections):\n completion += f\":{selections[0].replace('/', ' ')}\"\n # TODO: use `zstyle ':completion:*' extra-verbose true`\n # to control the display of default value\n helpstr = helpstr + f\" (default: {opt.value})\"\n return f\"{prefix}'[{helpstr}]{completion}'\"\n\n\nflags = [generate_completion(vd._options[opt][\"default\"]) for opt in vd._options]\n\nwith open(ZSH_COMPLETION_TEMPLATE) as f:\n template = f.read()\n\ntemplate = template.replace(\"{{flags}}\", \" \\\\\\n \".join(flags))\n\nwith open(ZSH_COMPLETION_FILE, \"w\") as f:\n f.write(template)\n", "path": "dev/zsh-completion.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\n\nimport os\nfrom os.path import dirname as dirn\nimport sys\nimport re\n\nsys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))\nfrom visidata import vd\nfrom visidata.main import option_aliases\n\nZSH_COMPLETION_FILE = \"_visidata\"\nZSH_COMPLETION_TEMPLATE = \"dev/zsh-completion.in\"\npat_class = re.compile(\"'(.*)'\")\npat_select = re.compile(\"^\\([^)]*\\)\")\n\n\ndef generate_completion(opt):\n prefix = \"--\" + opt.name\n shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]\n if len(shortnames):\n if len(shortnames[0]) == 1:\n shortname = \"-\" + shortnames[0]\n else:\n shortname = \"--\" + shortnames[0]\n prefix = \"{\" + f\"{shortname},{prefix}\" + \"}\"\n if isinstance(opt.value, bool):\n completion = \"\"\n else:\n completion = \":\" + pat_class.findall(str(opt.value.__class__))[0]\n if opt.name in [\"play\", \"output\", \"visidata_dir\", \"config\"]:\n completion += \":_files\"\n elif opt.name in [\"plugins_url\", \"motd_url\"]:\n completion += \":_urls\"\n helpstr = opt.helpstr\n selections = pat_select.findall(helpstr)\n if len(selections):\n completion += f\":{selections[0].replace('/', ' ')}\"\n # TODO: use `zstyle ':completion:*' extra-verbose true`\n # to control the display of default value\n helpstr = helpstr + f\" (default: {opt.value})\"\n helpstr = helpstr.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\")\n return f\"{prefix}'[{helpstr}]{completion}'\"\n\n\nflags = [generate_completion(vd._options[opt][\"default\"]) for opt in vd._options]\n\nwith open(ZSH_COMPLETION_TEMPLATE) as f:\n template = f.read()\n\ntemplate = template.replace(\"{{flags}}\", \" \\\\\\n \".join(flags))\n\nwith open(ZSH_COMPLETION_FILE, \"w\") as f:\n f.write(template)\n", "path": "dev/zsh-completion.py"}]}
948
212
gh_patches_debug_8893
rasdani/github-patches
git_diff
vacanza__python-holidays-806
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Swaziland deprecation warning Resolves #793. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `holidays/__init__.py` Content: ``` 1 # python-holidays 2 # --------------- 3 # A fast, efficient Python library for generating country, province and state 4 # specific sets of holidays on the fly. It aims to make determining whether a 5 # specific date is a holiday as fast and flexible as possible. 6 # 7 # Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022 8 # ryanss <ryanssdev@icloud.com> (c) 2014-2017 9 # Website: https://github.com/dr-prodigy/python-holidays 10 # License: MIT (see LICENSE file) 11 from holidays.constants import ( 12 MON, 13 TUE, 14 WED, 15 THU, 16 FRI, 17 SAT, 18 SUN, 19 WEEKEND, 20 JAN, 21 FEB, 22 MAR, 23 APR, 24 MAY, 25 JUN, 26 JUL, 27 AUG, 28 SEP, 29 OCT, 30 NOV, 31 DEC, 32 ) 33 from holidays.countries import * 34 from holidays.financial import * 35 from holidays.holiday_base import * # * import required for IDE docstrings 36 from holidays.utils import ( 37 CountryHoliday, 38 country_holidays, 39 financial_holidays, 40 list_supported_countries, 41 list_supported_financial, 42 ) 43 44 __version__ = "0.17" 45 ``` Path: `holidays/countries/eswatini.py` Content: ``` 1 # python-holidays 2 # --------------- 3 # A fast, efficient Python library for generating country, province and state 4 # specific sets of holidays on the fly. It aims to make determining whether a 5 # specific date is a holiday as fast and flexible as possible. 6 # 7 # Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022 8 # ryanss <ryanssdev@icloud.com> (c) 2014-2017 9 # Website: https://github.com/dr-prodigy/python-holidays 10 # License: MIT (see LICENSE file) 11 12 import warnings 13 from datetime import date 14 15 from dateutil.easter import easter 16 from dateutil.relativedelta import relativedelta as rd 17 18 from holidays.constants import SUN, JAN, APR, MAY, JUL, SEP, DEC 19 from holidays.holiday_base import HolidayBase 20 21 22 class Eswatini(HolidayBase): 23 """ 24 https://swazilii.org/sz/legislation/act/1938/71 25 https://www.officeholidays.com/countries/swaziland 26 """ 27 28 country = "SZ" 29 30 def _populate(self, year): 31 super()._populate(year) 32 33 # Observed since 1938 34 if year > 1938: 35 self[date(year, JAN, 1)] = "New Year's Day" 36 37 e = easter(year) 38 good_friday = e - rd(days=2) 39 easter_monday = e + rd(days=1) 40 ascension_day = e + rd(days=39) 41 self[good_friday] = "Good Friday" 42 self[easter_monday] = "Easter Monday" 43 self[ascension_day] = "Ascension Day" 44 45 if year > 1968: 46 self[date(year, APR, 25)] = "National Flag Day" 47 48 if year > 1982: 49 # https://www.officeholidays.com/holidays/swaziland/birthday-of-late-king-sobhuza 50 self[date(year, JUL, 22)] = "Birthday of Late King Sobhuza" 51 52 if year > 1986: 53 # https://www.officeholidays.com/holidays/swaziland/birthday-of-king-mswati-iii 54 self[date(year, APR, 19)] = "King's Birthday" 55 56 self[date(year, MAY, 1)] = "Worker's Day" 57 self[date(year, SEP, 6)] = "Independence Day" 58 self[date(year, DEC, 25)] = "Christmas Day" 59 self[date(year, DEC, 26)] = "Boxing Day" 60 61 # Once-off public holidays 62 y2k = "Y2K changeover" 63 64 if year == 1999: 65 # https://mg.co.za/article/1999-12-09-swaziland-declares-bank-holidays/ 66 self[date(1999, DEC, 31)] = y2k 67 if year == 2000: 68 self[date(2000, JAN, 3)] = y2k 69 70 # As of 2021/1/1, whenever a public holiday falls on a 71 # Sunday 72 # it rolls over to the following Monday 73 for k, v in list(self.items()): 74 75 if self.observed and k.weekday() == SUN and k.year == year: 76 add_days = 1 77 while self.get(k + rd(days=add_days)) is not None: 78 add_days += 1 79 self[k + rd(days=add_days)] = v + " (Day Off)" 80 81 82 class Swaziland(Eswatini): 83 warnings.warn( 84 "Swaziland is deprecated, use Eswatini instead.", 85 DeprecationWarning, 86 ) 87 pass 88 89 90 class SZ(Eswatini): 91 pass 92 93 94 class SZW(Eswatini): 95 pass 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/holidays/__init__.py b/holidays/__init__.py --- a/holidays/__init__.py +++ b/holidays/__init__.py @@ -41,4 +41,4 @@ list_supported_financial, ) -__version__ = "0.17" +__version__ = "0.17.1" diff --git a/holidays/countries/eswatini.py b/holidays/countries/eswatini.py --- a/holidays/countries/eswatini.py +++ b/holidays/countries/eswatini.py @@ -80,11 +80,13 @@ class Swaziland(Eswatini): - warnings.warn( - "Swaziland is deprecated, use Eswatini instead.", - DeprecationWarning, - ) - pass + def __init__(self, *args, **kwargs) -> None: + warnings.warn( + "Swaziland is deprecated, use Eswatini instead.", + DeprecationWarning, + ) + + super().__init__(*args, **kwargs) class SZ(Eswatini):
{"golden_diff": "diff --git a/holidays/__init__.py b/holidays/__init__.py\n--- a/holidays/__init__.py\n+++ b/holidays/__init__.py\n@@ -41,4 +41,4 @@\n list_supported_financial,\n )\n \n-__version__ = \"0.17\"\n+__version__ = \"0.17.1\"\ndiff --git a/holidays/countries/eswatini.py b/holidays/countries/eswatini.py\n--- a/holidays/countries/eswatini.py\n+++ b/holidays/countries/eswatini.py\n@@ -80,11 +80,13 @@\n \n \n class Swaziland(Eswatini):\n- warnings.warn(\n- \"Swaziland is deprecated, use Eswatini instead.\",\n- DeprecationWarning,\n- )\n- pass\n+ def __init__(self, *args, **kwargs) -> None:\n+ warnings.warn(\n+ \"Swaziland is deprecated, use Eswatini instead.\",\n+ DeprecationWarning,\n+ )\n+\n+ super().__init__(*args, **kwargs)\n \n \n class SZ(Eswatini):\n", "issue": "Swaziland deprecation warning\nResolves #793.\n", "before_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022\n# ryanss <ryanssdev@icloud.com> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\nfrom holidays.constants import (\n MON,\n TUE,\n WED,\n THU,\n FRI,\n SAT,\n SUN,\n WEEKEND,\n JAN,\n FEB,\n MAR,\n APR,\n MAY,\n JUN,\n JUL,\n AUG,\n SEP,\n OCT,\n NOV,\n DEC,\n)\nfrom holidays.countries import *\nfrom holidays.financial import *\nfrom holidays.holiday_base import * # * import required for IDE docstrings\nfrom holidays.utils import (\n CountryHoliday,\n country_holidays,\n financial_holidays,\n list_supported_countries,\n list_supported_financial,\n)\n\n__version__ = \"0.17\"\n", "path": "holidays/__init__.py"}, {"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022\n# ryanss <ryanssdev@icloud.com> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nimport warnings\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd\n\nfrom holidays.constants import SUN, JAN, APR, MAY, JUL, SEP, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Eswatini(HolidayBase):\n \"\"\"\n https://swazilii.org/sz/legislation/act/1938/71\n https://www.officeholidays.com/countries/swaziland\n \"\"\"\n\n country = \"SZ\"\n\n def _populate(self, year):\n super()._populate(year)\n\n # Observed since 1938\n if year > 1938:\n self[date(year, JAN, 1)] = \"New Year's Day\"\n\n e = easter(year)\n good_friday = e - rd(days=2)\n easter_monday = e + rd(days=1)\n ascension_day = e + rd(days=39)\n self[good_friday] = \"Good Friday\"\n self[easter_monday] = \"Easter Monday\"\n self[ascension_day] = \"Ascension Day\"\n\n if year > 1968:\n self[date(year, APR, 25)] = \"National Flag Day\"\n\n if year > 1982:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-late-king-sobhuza\n self[date(year, JUL, 22)] = \"Birthday of Late King Sobhuza\"\n\n if year > 1986:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-king-mswati-iii\n self[date(year, APR, 19)] = \"King's Birthday\"\n\n self[date(year, MAY, 1)] = \"Worker's Day\"\n self[date(year, SEP, 6)] = \"Independence Day\"\n self[date(year, DEC, 25)] = \"Christmas Day\"\n self[date(year, DEC, 26)] = \"Boxing Day\"\n\n # Once-off public holidays\n y2k = \"Y2K changeover\"\n\n if year == 1999:\n # https://mg.co.za/article/1999-12-09-swaziland-declares-bank-holidays/\n self[date(1999, DEC, 31)] = y2k\n if year == 2000:\n self[date(2000, JAN, 3)] = y2k\n\n # As of 2021/1/1, whenever a public holiday falls on a\n # Sunday\n # it rolls over to the following Monday\n for k, v in list(self.items()):\n\n if self.observed and k.weekday() == SUN and k.year == year:\n add_days = 1\n while self.get(k + rd(days=add_days)) is not None:\n add_days += 1\n self[k + rd(days=add_days)] = v + \" (Day Off)\"\n\n\nclass Swaziland(Eswatini):\n warnings.warn(\n \"Swaziland is deprecated, use Eswatini instead.\",\n DeprecationWarning,\n )\n pass\n\n\nclass SZ(Eswatini):\n pass\n\n\nclass SZW(Eswatini):\n pass\n", "path": "holidays/countries/eswatini.py"}], "after_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022\n# ryanss <ryanssdev@icloud.com> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\nfrom holidays.constants import (\n MON,\n TUE,\n WED,\n THU,\n FRI,\n SAT,\n SUN,\n WEEKEND,\n JAN,\n FEB,\n MAR,\n APR,\n MAY,\n JUN,\n JUL,\n AUG,\n SEP,\n OCT,\n NOV,\n DEC,\n)\nfrom holidays.countries import *\nfrom holidays.financial import *\nfrom holidays.holiday_base import * # * import required for IDE docstrings\nfrom holidays.utils import (\n CountryHoliday,\n country_holidays,\n financial_holidays,\n list_supported_countries,\n list_supported_financial,\n)\n\n__version__ = \"0.17.1\"\n", "path": "holidays/__init__.py"}, {"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022\n# ryanss <ryanssdev@icloud.com> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nimport warnings\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd\n\nfrom holidays.constants import SUN, JAN, APR, MAY, JUL, SEP, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Eswatini(HolidayBase):\n \"\"\"\n https://swazilii.org/sz/legislation/act/1938/71\n https://www.officeholidays.com/countries/swaziland\n \"\"\"\n\n country = \"SZ\"\n\n def _populate(self, year):\n super()._populate(year)\n\n # Observed since 1938\n if year > 1938:\n self[date(year, JAN, 1)] = \"New Year's Day\"\n\n e = easter(year)\n good_friday = e - rd(days=2)\n easter_monday = e + rd(days=1)\n ascension_day = e + rd(days=39)\n self[good_friday] = \"Good Friday\"\n self[easter_monday] = \"Easter Monday\"\n self[ascension_day] = \"Ascension Day\"\n\n if year > 1968:\n self[date(year, APR, 25)] = \"National Flag Day\"\n\n if year > 1982:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-late-king-sobhuza\n self[date(year, JUL, 22)] = \"Birthday of Late King Sobhuza\"\n\n if year > 1986:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-king-mswati-iii\n self[date(year, APR, 19)] = \"King's Birthday\"\n\n self[date(year, MAY, 1)] = \"Worker's Day\"\n self[date(year, SEP, 6)] = \"Independence Day\"\n self[date(year, DEC, 25)] = \"Christmas Day\"\n self[date(year, DEC, 26)] = \"Boxing Day\"\n\n # Once-off public holidays\n y2k = \"Y2K changeover\"\n\n if year == 1999:\n # https://mg.co.za/article/1999-12-09-swaziland-declares-bank-holidays/\n self[date(1999, DEC, 31)] = y2k\n if year == 2000:\n self[date(2000, JAN, 3)] = y2k\n\n # As of 2021/1/1, whenever a public holiday falls on a\n # Sunday\n # it rolls over to the following Monday\n for k, v in list(self.items()):\n\n if self.observed and k.weekday() == SUN and k.year == year:\n add_days = 1\n while self.get(k + rd(days=add_days)) is not None:\n add_days += 1\n self[k + rd(days=add_days)] = v + \" (Day Off)\"\n\n\nclass Swaziland(Eswatini):\n def __init__(self, *args, **kwargs) -> None:\n warnings.warn(\n \"Swaziland is deprecated, use Eswatini instead.\",\n DeprecationWarning,\n )\n\n super().__init__(*args, **kwargs)\n\n\nclass SZ(Eswatini):\n pass\n\n\nclass SZW(Eswatini):\n pass\n", "path": "holidays/countries/eswatini.py"}]}
1,746
251
gh_patches_debug_27065
rasdani/github-patches
git_diff
mdn__kuma-7869
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Refactorings for welcome HTML email Based on https://github.com/mdn/kuma/pull/7866 we'll drop the `utm` query string things and we'll get rid of the plain text email template. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kuma/users/tasks.py` Content: ``` 1 import logging 2 3 from celery import task 4 from django.conf import settings 5 from django.contrib.auth import get_user_model 6 from django.utils import translation 7 from django.utils.translation import gettext_lazy as _ 8 9 from kuma.core.decorators import skip_in_maintenance_mode 10 from kuma.core.email_utils import render_email 11 from kuma.core.utils import ( 12 EmailMultiAlternativesRetrying, 13 send_mail_retrying, 14 strings_are_translated, 15 ) 16 17 log = logging.getLogger("kuma.users.tasks") 18 19 20 WELCOME_EMAIL_STRINGS = [ 21 "Like words?", 22 "Don't be shy, if you have any doubt, problems, questions: contact us! We are here to help.", 23 ] 24 25 26 @task 27 @skip_in_maintenance_mode 28 def send_recovery_email(user_pk, email, locale=None): 29 user = get_user_model().objects.get(pk=user_pk) 30 locale = locale or settings.WIKI_DEFAULT_LANGUAGE 31 url = settings.SITE_URL + user.get_recovery_url() 32 context = {"recovery_url": url, "username": user.username} 33 with translation.override(locale): 34 subject = render_email("users/email/recovery/subject.ltxt", context) 35 # Email subject *must not* contain newlines 36 subject = "".join(subject.splitlines()) 37 plain = render_email("users/email/recovery/plain.ltxt", context) 38 send_mail_retrying(subject, plain, settings.DEFAULT_FROM_EMAIL, [email]) 39 40 41 @task 42 @skip_in_maintenance_mode 43 def send_welcome_email(user_pk, locale): 44 user = get_user_model().objects.get(pk=user_pk) 45 if locale == settings.WIKI_DEFAULT_LANGUAGE or strings_are_translated( 46 WELCOME_EMAIL_STRINGS, locale 47 ): 48 context = {"username": user.username} 49 log.debug("Using the locale %s to send the welcome email", locale) 50 with translation.override(locale): 51 content_plain = render_email("users/email/welcome/plain.ltxt", context) 52 content_html = render_email("users/email/welcome/html.ltxt", context) 53 54 email = EmailMultiAlternativesRetrying( 55 _("Getting started with your new MDN account"), 56 content_plain, 57 settings.WELCOME_EMAIL_FROM, 58 [user.email], 59 ) 60 email.attach_alternative(content_html, "text/html") 61 email.send() 62 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kuma/users/tasks.py b/kuma/users/tasks.py --- a/kuma/users/tasks.py +++ b/kuma/users/tasks.py @@ -5,6 +5,7 @@ from django.contrib.auth import get_user_model from django.utils import translation from django.utils.translation import gettext_lazy as _ +from pyquery import PyQuery as pq from kuma.core.decorators import skip_in_maintenance_mode from kuma.core.email_utils import render_email @@ -48,8 +49,18 @@ context = {"username": user.username} log.debug("Using the locale %s to send the welcome email", locale) with translation.override(locale): - content_plain = render_email("users/email/welcome/plain.ltxt", context) content_html = render_email("users/email/welcome/html.ltxt", context) + doc = pq(content_html) + urls = [] + for i, link in enumerate(doc("body a[href]").items()): + link.text(f"{link.text()}[{i + 1}]") + urls.append((i + 1, link.attr("href"))) + + content_plain = doc("body").text().replace("\n", "\n\n") + if urls: + content_plain += "\n\n" + for i, url in urls: + content_plain += f"[{i}] {url}\n" email = EmailMultiAlternativesRetrying( _("Getting started with your new MDN account"),
{"golden_diff": "diff --git a/kuma/users/tasks.py b/kuma/users/tasks.py\n--- a/kuma/users/tasks.py\n+++ b/kuma/users/tasks.py\n@@ -5,6 +5,7 @@\n from django.contrib.auth import get_user_model\n from django.utils import translation\n from django.utils.translation import gettext_lazy as _\n+from pyquery import PyQuery as pq\n \n from kuma.core.decorators import skip_in_maintenance_mode\n from kuma.core.email_utils import render_email\n@@ -48,8 +49,18 @@\n context = {\"username\": user.username}\n log.debug(\"Using the locale %s to send the welcome email\", locale)\n with translation.override(locale):\n- content_plain = render_email(\"users/email/welcome/plain.ltxt\", context)\n content_html = render_email(\"users/email/welcome/html.ltxt\", context)\n+ doc = pq(content_html)\n+ urls = []\n+ for i, link in enumerate(doc(\"body a[href]\").items()):\n+ link.text(f\"{link.text()}[{i + 1}]\")\n+ urls.append((i + 1, link.attr(\"href\")))\n+\n+ content_plain = doc(\"body\").text().replace(\"\\n\", \"\\n\\n\")\n+ if urls:\n+ content_plain += \"\\n\\n\"\n+ for i, url in urls:\n+ content_plain += f\"[{i}] {url}\\n\"\n \n email = EmailMultiAlternativesRetrying(\n _(\"Getting started with your new MDN account\"),\n", "issue": "Refactorings for welcome HTML email \nBased on https://github.com/mdn/kuma/pull/7866 \r\nwe'll drop the `utm` query string things and we'll get rid of the plain text email template. \n", "before_files": [{"content": "import logging\n\nfrom celery import task\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.utils import translation\nfrom django.utils.translation import gettext_lazy as _\n\nfrom kuma.core.decorators import skip_in_maintenance_mode\nfrom kuma.core.email_utils import render_email\nfrom kuma.core.utils import (\n EmailMultiAlternativesRetrying,\n send_mail_retrying,\n strings_are_translated,\n)\n\nlog = logging.getLogger(\"kuma.users.tasks\")\n\n\nWELCOME_EMAIL_STRINGS = [\n \"Like words?\",\n \"Don't be shy, if you have any doubt, problems, questions: contact us! We are here to help.\",\n]\n\n\n@task\n@skip_in_maintenance_mode\ndef send_recovery_email(user_pk, email, locale=None):\n user = get_user_model().objects.get(pk=user_pk)\n locale = locale or settings.WIKI_DEFAULT_LANGUAGE\n url = settings.SITE_URL + user.get_recovery_url()\n context = {\"recovery_url\": url, \"username\": user.username}\n with translation.override(locale):\n subject = render_email(\"users/email/recovery/subject.ltxt\", context)\n # Email subject *must not* contain newlines\n subject = \"\".join(subject.splitlines())\n plain = render_email(\"users/email/recovery/plain.ltxt\", context)\n send_mail_retrying(subject, plain, settings.DEFAULT_FROM_EMAIL, [email])\n\n\n@task\n@skip_in_maintenance_mode\ndef send_welcome_email(user_pk, locale):\n user = get_user_model().objects.get(pk=user_pk)\n if locale == settings.WIKI_DEFAULT_LANGUAGE or strings_are_translated(\n WELCOME_EMAIL_STRINGS, locale\n ):\n context = {\"username\": user.username}\n log.debug(\"Using the locale %s to send the welcome email\", locale)\n with translation.override(locale):\n content_plain = render_email(\"users/email/welcome/plain.ltxt\", context)\n content_html = render_email(\"users/email/welcome/html.ltxt\", context)\n\n email = EmailMultiAlternativesRetrying(\n _(\"Getting started with your new MDN account\"),\n content_plain,\n settings.WELCOME_EMAIL_FROM,\n [user.email],\n )\n email.attach_alternative(content_html, \"text/html\")\n email.send()\n", "path": "kuma/users/tasks.py"}], "after_files": [{"content": "import logging\n\nfrom celery import task\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.utils import translation\nfrom django.utils.translation import gettext_lazy as _\nfrom pyquery import PyQuery as pq\n\nfrom kuma.core.decorators import skip_in_maintenance_mode\nfrom kuma.core.email_utils import render_email\nfrom kuma.core.utils import (\n EmailMultiAlternativesRetrying,\n send_mail_retrying,\n strings_are_translated,\n)\n\nlog = logging.getLogger(\"kuma.users.tasks\")\n\n\nWELCOME_EMAIL_STRINGS = [\n \"Like words?\",\n \"Don't be shy, if you have any doubt, problems, questions: contact us! We are here to help.\",\n]\n\n\n@task\n@skip_in_maintenance_mode\ndef send_recovery_email(user_pk, email, locale=None):\n user = get_user_model().objects.get(pk=user_pk)\n locale = locale or settings.WIKI_DEFAULT_LANGUAGE\n url = settings.SITE_URL + user.get_recovery_url()\n context = {\"recovery_url\": url, \"username\": user.username}\n with translation.override(locale):\n subject = render_email(\"users/email/recovery/subject.ltxt\", context)\n # Email subject *must not* contain newlines\n subject = \"\".join(subject.splitlines())\n plain = render_email(\"users/email/recovery/plain.ltxt\", context)\n send_mail_retrying(subject, plain, settings.DEFAULT_FROM_EMAIL, [email])\n\n\n@task\n@skip_in_maintenance_mode\ndef send_welcome_email(user_pk, locale):\n user = get_user_model().objects.get(pk=user_pk)\n if locale == settings.WIKI_DEFAULT_LANGUAGE or strings_are_translated(\n WELCOME_EMAIL_STRINGS, locale\n ):\n context = {\"username\": user.username}\n log.debug(\"Using the locale %s to send the welcome email\", locale)\n with translation.override(locale):\n content_html = render_email(\"users/email/welcome/html.ltxt\", context)\n doc = pq(content_html)\n urls = []\n for i, link in enumerate(doc(\"body a[href]\").items()):\n link.text(f\"{link.text()}[{i + 1}]\")\n urls.append((i + 1, link.attr(\"href\")))\n\n content_plain = doc(\"body\").text().replace(\"\\n\", \"\\n\\n\")\n if urls:\n content_plain += \"\\n\\n\"\n for i, url in urls:\n content_plain += f\"[{i}] {url}\\n\"\n\n email = EmailMultiAlternativesRetrying(\n _(\"Getting started with your new MDN account\"),\n content_plain,\n settings.WELCOME_EMAIL_FROM,\n [user.email],\n )\n email.attach_alternative(content_html, \"text/html\")\n email.send()\n", "path": "kuma/users/tasks.py"}]}
902
316
gh_patches_debug_38488
rasdani/github-patches
git_diff
larq__larq-356
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make the HyperparameterScheduler compatible with the CaseOptimizer ### Feature motivation The HyperparameterScheduler is not compatible with the CaseOptimizer since the hyperparameters are attributes of the optimizers inside the CaseOptimizer. ### Feature description I propose one of the two possible solutions: Either we could give HyperparameterScheduler the optimizer as an argument. It could be called via ``` HyperparameterScheduler(schedule, hyperparameter, optimizer, verbose=0) ``` and the right optimizer inside the CaseOptimizer can be addressed. (@koenhelwegen) My second proposal would be to search the CaseOptimizer for optimizers that have the hyperparameter as attribute. Then the schedule can be applied to this optimizer only. The downside of this would be that in case there are two optimizers inside the CaseOptimizer that have a hyperparameter with the same name the schedule would be applied to both of them. I do not think this would happen very often but it could definitively be an issue. See code below for my second proposal. ### Feature implementation ``` python class HyperparameterScheduler(tf.keras.callbacks.Callback): """Generic hyperparameter scheduler. # Arguments schedule: a function that takes an epoch index as input (integer, indexed from 0) and returns a new hyperparameter as output. hyperparameter: str. the name of the hyperparameter to be scheduled. verbose: int. 0: quiet, 1: update messages. """ def __init__(self, schedule, hyperparameter, verbose=0): super(HyperparameterScheduler, self).__init__() self.schedule = schedule self.hyperparameter = hyperparameter self.verbose = verbose def on_epoch_begin(self, epoch, logs=None): for op in self.model.optimizer.optimizers: if hasattr(op, self.hyperparameter): hp = getattr(op, self.hyperparameter) try: # new API hyperparameter_val = tf.keras.backend.get_value(hp) hyperparameter_val = self.schedule(epoch, hyperparameter_val) except TypeError: # Support for old API for backward compatibility hyperparameter_val = self.schedule(epoch) tf.keras.backend.set_value(hp, hyperparameter_val) if self.verbose > 0: print( f"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}." ) def on_epoch_end(self, epoch, logs=None): logs = logs or {} for op in self.model.optimizer.optimizers: if hasattr(op, self.hyperparameter): hp = getattr(op, self.hyperparameter) logs[self.hyperparameter] = tf.keras.backend.get_value(hp) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `larq/callbacks.py` Content: ``` 1 import tensorflow as tf 2 3 4 class HyperparameterScheduler(tf.keras.callbacks.Callback): 5 """Generic hyperparameter scheduler. 6 7 # Arguments 8 schedule: a function that takes an epoch index as input 9 (integer, indexed from 0) and returns a new hyperparameter as output. 10 hyperparameter: str. the name of the hyperparameter to be scheduled. 11 verbose: int. 0: quiet, 1: update messages. 12 """ 13 14 def __init__(self, schedule, hyperparameter, verbose=0): 15 super(HyperparameterScheduler, self).__init__() 16 self.schedule = schedule 17 self.hyperparameter = hyperparameter 18 self.verbose = verbose 19 20 def on_epoch_begin(self, epoch, logs=None): 21 if not hasattr(self.model.optimizer, self.hyperparameter): 22 raise ValueError( 23 f'Optimizer must have a "{self.hyperparameter}" attribute.' 24 ) 25 26 hp = getattr(self.model.optimizer, self.hyperparameter) 27 try: # new API 28 hyperparameter_val = tf.keras.backend.get_value(hp) 29 hyperparameter_val = self.schedule(epoch, hyperparameter_val) 30 except TypeError: # Support for old API for backward compatibility 31 hyperparameter_val = self.schedule(epoch) 32 33 tf.keras.backend.set_value(hp, hyperparameter_val) 34 35 if self.verbose > 0: 36 print( 37 f"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}." 38 ) 39 40 def on_epoch_end(self, epoch, logs=None): 41 logs = logs or {} 42 hp = getattr(self.model.optimizer, self.hyperparameter) 43 logs[self.hyperparameter] = tf.keras.backend.get_value(hp) 44 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/larq/callbacks.py b/larq/callbacks.py --- a/larq/callbacks.py +++ b/larq/callbacks.py @@ -4,26 +4,40 @@ class HyperparameterScheduler(tf.keras.callbacks.Callback): """Generic hyperparameter scheduler. + !!! example + ```python + bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3) + adam = tf.keras.optimizers.Adam(0.01) + optimizer = lq.optimizers.CaseOptimizer( + (lq.optimizers.Bop.is_binary_variable, bop), default_optimizer=adam, + ) + callbacks = [ + HyperparameterScheduler(lambda x: 0.001 * (0.1 ** (x // 30)), "gamma", bop) + ] + ``` # Arguments + optimizer: the optimizer that contains the hyperparameter that will be scheduled. + Defaults to `self.model.optimizer` if `optimizer == None`. schedule: a function that takes an epoch index as input (integer, indexed from 0) and returns a new hyperparameter as output. hyperparameter: str. the name of the hyperparameter to be scheduled. verbose: int. 0: quiet, 1: update messages. """ - def __init__(self, schedule, hyperparameter, verbose=0): + def __init__(self, schedule, hyperparameter, optimizer=None, verbose=0): super(HyperparameterScheduler, self).__init__() + self.optimizer = optimizer if optimizer else self.model.optimizer self.schedule = schedule self.hyperparameter = hyperparameter self.verbose = verbose def on_epoch_begin(self, epoch, logs=None): - if not hasattr(self.model.optimizer, self.hyperparameter): + if not hasattr(self.optimizer, self.hyperparameter): raise ValueError( f'Optimizer must have a "{self.hyperparameter}" attribute.' ) - hp = getattr(self.model.optimizer, self.hyperparameter) + hp = getattr(self.optimizer, self.hyperparameter) try: # new API hyperparameter_val = tf.keras.backend.get_value(hp) hyperparameter_val = self.schedule(epoch, hyperparameter_val) @@ -34,10 +48,10 @@ if self.verbose > 0: print( - f"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}." + f"Epoch {epoch + 1}: {self.hyperparameter} changing to {tf.keras.backend.get_value(hp)}." ) def on_epoch_end(self, epoch, logs=None): logs = logs or {} - hp = getattr(self.model.optimizer, self.hyperparameter) + hp = getattr(self.optimizer, self.hyperparameter) logs[self.hyperparameter] = tf.keras.backend.get_value(hp)
{"golden_diff": "diff --git a/larq/callbacks.py b/larq/callbacks.py\n--- a/larq/callbacks.py\n+++ b/larq/callbacks.py\n@@ -4,26 +4,40 @@\n class HyperparameterScheduler(tf.keras.callbacks.Callback):\n \"\"\"Generic hyperparameter scheduler.\n \n+ !!! example\n+ ```python\n+ bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)\n+ adam = tf.keras.optimizers.Adam(0.01)\n+ optimizer = lq.optimizers.CaseOptimizer(\n+ (lq.optimizers.Bop.is_binary_variable, bop), default_optimizer=adam,\n+ )\n+ callbacks = [\n+ HyperparameterScheduler(lambda x: 0.001 * (0.1 ** (x // 30)), \"gamma\", bop)\n+ ]\n+ ```\n # Arguments\n+ optimizer: the optimizer that contains the hyperparameter that will be scheduled.\n+ Defaults to `self.model.optimizer` if `optimizer == None`.\n schedule: a function that takes an epoch index as input\n (integer, indexed from 0) and returns a new hyperparameter as output.\n hyperparameter: str. the name of the hyperparameter to be scheduled.\n verbose: int. 0: quiet, 1: update messages.\n \"\"\"\n \n- def __init__(self, schedule, hyperparameter, verbose=0):\n+ def __init__(self, schedule, hyperparameter, optimizer=None, verbose=0):\n super(HyperparameterScheduler, self).__init__()\n+ self.optimizer = optimizer if optimizer else self.model.optimizer\n self.schedule = schedule\n self.hyperparameter = hyperparameter\n self.verbose = verbose\n \n def on_epoch_begin(self, epoch, logs=None):\n- if not hasattr(self.model.optimizer, self.hyperparameter):\n+ if not hasattr(self.optimizer, self.hyperparameter):\n raise ValueError(\n f'Optimizer must have a \"{self.hyperparameter}\" attribute.'\n )\n \n- hp = getattr(self.model.optimizer, self.hyperparameter)\n+ hp = getattr(self.optimizer, self.hyperparameter)\n try: # new API\n hyperparameter_val = tf.keras.backend.get_value(hp)\n hyperparameter_val = self.schedule(epoch, hyperparameter_val)\n@@ -34,10 +48,10 @@\n \n if self.verbose > 0:\n print(\n- f\"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}.\"\n+ f\"Epoch {epoch + 1}: {self.hyperparameter} changing to {tf.keras.backend.get_value(hp)}.\"\n )\n \n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n- hp = getattr(self.model.optimizer, self.hyperparameter)\n+ hp = getattr(self.optimizer, self.hyperparameter)\n logs[self.hyperparameter] = tf.keras.backend.get_value(hp)\n", "issue": "Make the HyperparameterScheduler compatible with the CaseOptimizer\n### Feature motivation\r\nThe HyperparameterScheduler is not compatible with the CaseOptimizer since the hyperparameters are attributes of the optimizers inside the CaseOptimizer. \r\n\r\n### Feature description\r\nI propose one of the two possible solutions: \r\nEither we could give HyperparameterScheduler the optimizer as an argument. It could be called via ``` HyperparameterScheduler(schedule, hyperparameter, optimizer, verbose=0) ``` and the right optimizer inside the CaseOptimizer can be addressed. (@koenhelwegen) \r\n\r\nMy second proposal would be to search the CaseOptimizer for optimizers that have the hyperparameter as attribute. Then the schedule can be applied to this optimizer only. The downside of this would be that in case there are two optimizers inside the CaseOptimizer that have a hyperparameter with the same name the schedule would be applied to both of them. I do not think this would happen very often but it could definitively be an issue. See code below for my second proposal. \r\n### Feature implementation\r\n``` python \r\nclass HyperparameterScheduler(tf.keras.callbacks.Callback):\r\n \"\"\"Generic hyperparameter scheduler.\r\n # Arguments\r\n schedule: a function that takes an epoch index as input\r\n (integer, indexed from 0) and returns a new hyperparameter as output.\r\n hyperparameter: str. the name of the hyperparameter to be scheduled.\r\n verbose: int. 0: quiet, 1: update messages.\r\n \"\"\"\r\n\r\n def __init__(self, schedule, hyperparameter, verbose=0):\r\n super(HyperparameterScheduler, self).__init__()\r\n self.schedule = schedule\r\n self.hyperparameter = hyperparameter\r\n self.verbose = verbose\r\n\r\n def on_epoch_begin(self, epoch, logs=None):\r\n for op in self.model.optimizer.optimizers:\r\n if hasattr(op, self.hyperparameter):\r\n\r\n hp = getattr(op, self.hyperparameter)\r\n try: # new API\r\n hyperparameter_val = tf.keras.backend.get_value(hp)\r\n hyperparameter_val = self.schedule(epoch, hyperparameter_val)\r\n except TypeError: # Support for old API for backward compatibility\r\n hyperparameter_val = self.schedule(epoch)\r\n\r\n tf.keras.backend.set_value(hp, hyperparameter_val)\r\n\r\n if self.verbose > 0:\r\n print(\r\n f\"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}.\"\r\n )\r\n\r\n def on_epoch_end(self, epoch, logs=None):\r\n logs = logs or {}\r\n for op in self.model.optimizer.optimizers:\r\n if hasattr(op, self.hyperparameter):\r\n hp = getattr(op, self.hyperparameter)\r\n logs[self.hyperparameter] = tf.keras.backend.get_value(hp)\r\n```\n", "before_files": [{"content": "import tensorflow as tf\n\n\nclass HyperparameterScheduler(tf.keras.callbacks.Callback):\n \"\"\"Generic hyperparameter scheduler.\n\n # Arguments\n schedule: a function that takes an epoch index as input\n (integer, indexed from 0) and returns a new hyperparameter as output.\n hyperparameter: str. the name of the hyperparameter to be scheduled.\n verbose: int. 0: quiet, 1: update messages.\n \"\"\"\n\n def __init__(self, schedule, hyperparameter, verbose=0):\n super(HyperparameterScheduler, self).__init__()\n self.schedule = schedule\n self.hyperparameter = hyperparameter\n self.verbose = verbose\n\n def on_epoch_begin(self, epoch, logs=None):\n if not hasattr(self.model.optimizer, self.hyperparameter):\n raise ValueError(\n f'Optimizer must have a \"{self.hyperparameter}\" attribute.'\n )\n\n hp = getattr(self.model.optimizer, self.hyperparameter)\n try: # new API\n hyperparameter_val = tf.keras.backend.get_value(hp)\n hyperparameter_val = self.schedule(epoch, hyperparameter_val)\n except TypeError: # Support for old API for backward compatibility\n hyperparameter_val = self.schedule(epoch)\n\n tf.keras.backend.set_value(hp, hyperparameter_val)\n\n if self.verbose > 0:\n print(\n f\"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}.\"\n )\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n hp = getattr(self.model.optimizer, self.hyperparameter)\n logs[self.hyperparameter] = tf.keras.backend.get_value(hp)\n", "path": "larq/callbacks.py"}], "after_files": [{"content": "import tensorflow as tf\n\n\nclass HyperparameterScheduler(tf.keras.callbacks.Callback):\n \"\"\"Generic hyperparameter scheduler.\n\n !!! example\n ```python\n bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)\n adam = tf.keras.optimizers.Adam(0.01)\n optimizer = lq.optimizers.CaseOptimizer(\n (lq.optimizers.Bop.is_binary_variable, bop), default_optimizer=adam,\n )\n callbacks = [\n HyperparameterScheduler(lambda x: 0.001 * (0.1 ** (x // 30)), \"gamma\", bop)\n ]\n ```\n # Arguments\n optimizer: the optimizer that contains the hyperparameter that will be scheduled.\n Defaults to `self.model.optimizer` if `optimizer == None`.\n schedule: a function that takes an epoch index as input\n (integer, indexed from 0) and returns a new hyperparameter as output.\n hyperparameter: str. the name of the hyperparameter to be scheduled.\n verbose: int. 0: quiet, 1: update messages.\n \"\"\"\n\n def __init__(self, schedule, hyperparameter, optimizer=None, verbose=0):\n super(HyperparameterScheduler, self).__init__()\n self.optimizer = optimizer if optimizer else self.model.optimizer\n self.schedule = schedule\n self.hyperparameter = hyperparameter\n self.verbose = verbose\n\n def on_epoch_begin(self, epoch, logs=None):\n if not hasattr(self.optimizer, self.hyperparameter):\n raise ValueError(\n f'Optimizer must have a \"{self.hyperparameter}\" attribute.'\n )\n\n hp = getattr(self.optimizer, self.hyperparameter)\n try: # new API\n hyperparameter_val = tf.keras.backend.get_value(hp)\n hyperparameter_val = self.schedule(epoch, hyperparameter_val)\n except TypeError: # Support for old API for backward compatibility\n hyperparameter_val = self.schedule(epoch)\n\n tf.keras.backend.set_value(hp, hyperparameter_val)\n\n if self.verbose > 0:\n print(\n f\"Epoch {epoch + 1}: {self.hyperparameter} changing to {tf.keras.backend.get_value(hp)}.\"\n )\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n hp = getattr(self.optimizer, self.hyperparameter)\n logs[self.hyperparameter] = tf.keras.backend.get_value(hp)\n", "path": "larq/callbacks.py"}]}
1,273
650
gh_patches_debug_118
rasdani/github-patches
git_diff
librosa__librosa-1738
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release new version to fix scipy tests https://github.com/librosa/librosa/commit/12dee8eabed7df14c5622b52c05393ddfeb11f4b fixed compatibility with scipy in tests but it's not included in any release. We rely as downstream packagers on tests to ensure all python dependencies play well together. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `librosa/version.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """Version info""" 4 5 import sys 6 import importlib 7 8 short_version = "0.10" 9 version = "0.10.1dev" 10 11 12 def __get_mod_version(modname): 13 try: 14 if modname in sys.modules: 15 mod = sys.modules[modname] 16 else: 17 mod = importlib.import_module(modname) 18 try: 19 return mod.__version__ 20 except AttributeError: 21 return "installed, no version number available" 22 23 except ImportError: 24 return None 25 26 27 def show_versions() -> None: 28 """Return the version information for all librosa dependencies.""" 29 core_deps = [ 30 "audioread", 31 "numpy", 32 "scipy", 33 "sklearn", 34 "joblib", 35 "decorator", 36 "numba", 37 "soundfile", 38 "pooch", 39 "soxr", 40 "typing_extensions", 41 "lazy_loader", 42 "msgpack", 43 ] 44 45 extra_deps = [ 46 "numpydoc", 47 "sphinx", 48 "sphinx_rtd_theme", 49 "matplotlib", 50 "sphinx_multiversion", 51 "sphinx_gallery", 52 "mir_eval", 53 "ipython", 54 "sphinxcontrib.rsvgconverter", 55 "pytest", 56 "pytest_mpl", 57 "pytest_cov", 58 "samplerate", 59 "resampy", 60 "presets", 61 "packaging", 62 ] 63 64 print("INSTALLED VERSIONS") 65 print("------------------") 66 print(f"python: {sys.version}\n") 67 print(f"librosa: {version}\n") 68 for dep in core_deps: 69 print("{}: {}".format(dep, __get_mod_version(dep))) 70 print("") 71 for dep in extra_deps: 72 print("{}: {}".format(dep, __get_mod_version(dep))) 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/librosa/version.py b/librosa/version.py --- a/librosa/version.py +++ b/librosa/version.py @@ -6,7 +6,7 @@ import importlib short_version = "0.10" -version = "0.10.1dev" +version = "0.10.1" def __get_mod_version(modname):
{"golden_diff": "diff --git a/librosa/version.py b/librosa/version.py\n--- a/librosa/version.py\n+++ b/librosa/version.py\n@@ -6,7 +6,7 @@\n import importlib\n \n short_version = \"0.10\"\n-version = \"0.10.1dev\"\n+version = \"0.10.1\"\n \n \n def __get_mod_version(modname):\n", "issue": "Release new version to fix scipy tests\nhttps://github.com/librosa/librosa/commit/12dee8eabed7df14c5622b52c05393ddfeb11f4b fixed compatibility with scipy in tests but it's not included in any release.\r\nWe rely as downstream packagers on tests to ensure all python dependencies play well together.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Version info\"\"\"\n\nimport sys\nimport importlib\n\nshort_version = \"0.10\"\nversion = \"0.10.1dev\"\n\n\ndef __get_mod_version(modname):\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n try:\n return mod.__version__\n except AttributeError:\n return \"installed, no version number available\"\n\n except ImportError:\n return None\n\n\ndef show_versions() -> None:\n \"\"\"Return the version information for all librosa dependencies.\"\"\"\n core_deps = [\n \"audioread\",\n \"numpy\",\n \"scipy\",\n \"sklearn\",\n \"joblib\",\n \"decorator\",\n \"numba\",\n \"soundfile\",\n \"pooch\",\n \"soxr\",\n \"typing_extensions\",\n \"lazy_loader\",\n \"msgpack\",\n ]\n\n extra_deps = [\n \"numpydoc\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"matplotlib\",\n \"sphinx_multiversion\",\n \"sphinx_gallery\",\n \"mir_eval\",\n \"ipython\",\n \"sphinxcontrib.rsvgconverter\",\n \"pytest\",\n \"pytest_mpl\",\n \"pytest_cov\",\n \"samplerate\",\n \"resampy\",\n \"presets\",\n \"packaging\",\n ]\n\n print(\"INSTALLED VERSIONS\")\n print(\"------------------\")\n print(f\"python: {sys.version}\\n\")\n print(f\"librosa: {version}\\n\")\n for dep in core_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n print(\"\")\n for dep in extra_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n", "path": "librosa/version.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Version info\"\"\"\n\nimport sys\nimport importlib\n\nshort_version = \"0.10\"\nversion = \"0.10.1\"\n\n\ndef __get_mod_version(modname):\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n try:\n return mod.__version__\n except AttributeError:\n return \"installed, no version number available\"\n\n except ImportError:\n return None\n\n\ndef show_versions() -> None:\n \"\"\"Return the version information for all librosa dependencies.\"\"\"\n core_deps = [\n \"audioread\",\n \"numpy\",\n \"scipy\",\n \"sklearn\",\n \"joblib\",\n \"decorator\",\n \"numba\",\n \"soundfile\",\n \"pooch\",\n \"soxr\",\n \"typing_extensions\",\n \"lazy_loader\",\n \"msgpack\",\n ]\n\n extra_deps = [\n \"numpydoc\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"matplotlib\",\n \"sphinx_multiversion\",\n \"sphinx_gallery\",\n \"mir_eval\",\n \"ipython\",\n \"sphinxcontrib.rsvgconverter\",\n \"pytest\",\n \"pytest_mpl\",\n \"pytest_cov\",\n \"samplerate\",\n \"resampy\",\n \"presets\",\n \"packaging\",\n ]\n\n print(\"INSTALLED VERSIONS\")\n print(\"------------------\")\n print(f\"python: {sys.version}\\n\")\n print(f\"librosa: {version}\\n\")\n for dep in core_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n print(\"\")\n for dep in extra_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n", "path": "librosa/version.py"}]}
882
86
gh_patches_debug_11399
rasdani/github-patches
git_diff
ethereum__web3.py-407
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove shh from default list of modules The whisper protocol is not standardized enough to be in the default list. Also, note in the docs the current fractured nature of whisper. See #384 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `web3/main.py` Content: ``` 1 from __future__ import absolute_import 2 3 from eth_utils import ( 4 apply_to_return_value, 5 add_0x_prefix, 6 from_wei, 7 is_address, 8 is_checksum_address, 9 keccak, 10 remove_0x_prefix, 11 to_checksum_address, 12 to_wei, 13 ) 14 15 from web3.admin import Admin 16 from web3.eth import Eth 17 from web3.iban import Iban 18 from web3.miner import Miner 19 from web3.net import Net 20 from web3.personal import Personal 21 from web3.shh import Shh 22 from web3.testing import Testing 23 from web3.txpool import TxPool 24 from web3.version import Version 25 26 from web3.providers.ipc import ( 27 IPCProvider, 28 ) 29 from web3.providers.rpc import ( 30 HTTPProvider, 31 ) 32 from web3.providers.tester import ( 33 TestRPCProvider, 34 EthereumTesterProvider, 35 ) 36 37 from web3.manager import ( 38 RequestManager, 39 ) 40 41 from web3.utils.datastructures import ( 42 HexBytes, 43 ) 44 from web3.utils.encoding import ( 45 hex_encode_abi_type, 46 to_bytes, 47 to_int, 48 to_hex, 49 to_text, 50 ) 51 52 53 def get_default_modules(): 54 return { 55 "eth": Eth, 56 "shh": Shh, 57 "net": Net, 58 "personal": Personal, 59 "version": Version, 60 "txpool": TxPool, 61 "miner": Miner, 62 "admin": Admin, 63 "testing": Testing, 64 } 65 66 67 class Web3(object): 68 # Providers 69 HTTPProvider = HTTPProvider 70 IPCProvider = IPCProvider 71 TestRPCProvider = TestRPCProvider 72 EthereumTesterProvider = EthereumTesterProvider 73 74 # Managers 75 RequestManager = RequestManager 76 77 # Iban 78 Iban = Iban 79 80 # Encoding and Decoding 81 toBytes = staticmethod(to_bytes) 82 toInt = staticmethod(to_int) 83 toHex = staticmethod(to_hex) 84 toText = staticmethod(to_text) 85 86 # Currency Utility 87 toWei = staticmethod(to_wei) 88 fromWei = staticmethod(from_wei) 89 90 # Address Utility 91 isAddress = staticmethod(is_address) 92 isChecksumAddress = staticmethod(is_checksum_address) 93 toChecksumAddress = staticmethod(to_checksum_address) 94 95 def __init__(self, providers, middlewares=None, modules=None): 96 self.manager = RequestManager(self, providers, middlewares) 97 98 if modules is None: 99 modules = get_default_modules() 100 101 for module_name, module_class in modules.items(): 102 module_class.attach(self, module_name) 103 104 @property 105 def middleware_stack(self): 106 return self.manager.middleware_stack 107 108 @property 109 def providers(self): 110 return self.manager.providers 111 112 def setProviders(self, providers): 113 self.manager.setProvider(providers) 114 115 @staticmethod 116 @apply_to_return_value(HexBytes) 117 def sha3(primitive=None, text=None, hexstr=None): 118 if isinstance(primitive, (bytes, int, type(None))): 119 input_bytes = to_bytes(primitive, hexstr=hexstr, text=text) 120 return keccak(input_bytes) 121 122 raise TypeError( 123 "You called sha3 with first arg %r and keywords %r. You must call it with one of " 124 "these approaches: sha3(text='txt'), sha3(hexstr='0x747874'), " 125 "sha3(b'\\x74\\x78\\x74'), or sha3(0x747874)." % ( 126 primitive, 127 {'text': text, 'hexstr': hexstr} 128 ) 129 ) 130 131 @classmethod 132 def soliditySha3(cls, abi_types, values): 133 """ 134 Executes sha3 (keccak256) exactly as Solidity does. 135 Takes list of abi_types as inputs -- `[uint24, int8[], bool]` 136 and list of corresponding values -- `[20, [-1, 5, 0], True]` 137 """ 138 if len(abi_types) != len(values): 139 raise ValueError( 140 "Length mismatch between provided abi types and values. Got " 141 "{0} types and {1} values.".format(len(abi_types), len(values)) 142 ) 143 144 hex_string = add_0x_prefix(''.join( 145 remove_0x_prefix(hex_encode_abi_type(abi_type, value)) 146 for abi_type, value 147 in zip(abi_types, values) 148 )) 149 return cls.sha3(hexstr=hex_string) 150 151 def isConnected(self): 152 for provider in self.providers: 153 if provider.isConnected(): 154 return True 155 else: 156 return False 157 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/web3/main.py b/web3/main.py --- a/web3/main.py +++ b/web3/main.py @@ -18,7 +18,6 @@ from web3.miner import Miner from web3.net import Net from web3.personal import Personal -from web3.shh import Shh from web3.testing import Testing from web3.txpool import TxPool from web3.version import Version @@ -53,7 +52,6 @@ def get_default_modules(): return { "eth": Eth, - "shh": Shh, "net": Net, "personal": Personal, "version": Version,
{"golden_diff": "diff --git a/web3/main.py b/web3/main.py\n--- a/web3/main.py\n+++ b/web3/main.py\n@@ -18,7 +18,6 @@\n from web3.miner import Miner\n from web3.net import Net\n from web3.personal import Personal\n-from web3.shh import Shh\n from web3.testing import Testing\n from web3.txpool import TxPool\n from web3.version import Version\n@@ -53,7 +52,6 @@\n def get_default_modules():\n return {\n \"eth\": Eth,\n- \"shh\": Shh,\n \"net\": Net,\n \"personal\": Personal,\n \"version\": Version,\n", "issue": "Remove shh from default list of modules\nThe whisper protocol is not standardized enough to be in the default list.\r\n\r\nAlso, note in the docs the current fractured nature of whisper.\r\n\r\nSee #384 \n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom eth_utils import (\n apply_to_return_value,\n add_0x_prefix,\n from_wei,\n is_address,\n is_checksum_address,\n keccak,\n remove_0x_prefix,\n to_checksum_address,\n to_wei,\n)\n\nfrom web3.admin import Admin\nfrom web3.eth import Eth\nfrom web3.iban import Iban\nfrom web3.miner import Miner\nfrom web3.net import Net\nfrom web3.personal import Personal\nfrom web3.shh import Shh\nfrom web3.testing import Testing\nfrom web3.txpool import TxPool\nfrom web3.version import Version\n\nfrom web3.providers.ipc import (\n IPCProvider,\n)\nfrom web3.providers.rpc import (\n HTTPProvider,\n)\nfrom web3.providers.tester import (\n TestRPCProvider,\n EthereumTesterProvider,\n)\n\nfrom web3.manager import (\n RequestManager,\n)\n\nfrom web3.utils.datastructures import (\n HexBytes,\n)\nfrom web3.utils.encoding import (\n hex_encode_abi_type,\n to_bytes,\n to_int,\n to_hex,\n to_text,\n)\n\n\ndef get_default_modules():\n return {\n \"eth\": Eth,\n \"shh\": Shh,\n \"net\": Net,\n \"personal\": Personal,\n \"version\": Version,\n \"txpool\": TxPool,\n \"miner\": Miner,\n \"admin\": Admin,\n \"testing\": Testing,\n }\n\n\nclass Web3(object):\n # Providers\n HTTPProvider = HTTPProvider\n IPCProvider = IPCProvider\n TestRPCProvider = TestRPCProvider\n EthereumTesterProvider = EthereumTesterProvider\n\n # Managers\n RequestManager = RequestManager\n\n # Iban\n Iban = Iban\n\n # Encoding and Decoding\n toBytes = staticmethod(to_bytes)\n toInt = staticmethod(to_int)\n toHex = staticmethod(to_hex)\n toText = staticmethod(to_text)\n\n # Currency Utility\n toWei = staticmethod(to_wei)\n fromWei = staticmethod(from_wei)\n\n # Address Utility\n isAddress = staticmethod(is_address)\n isChecksumAddress = staticmethod(is_checksum_address)\n toChecksumAddress = staticmethod(to_checksum_address)\n\n def __init__(self, providers, middlewares=None, modules=None):\n self.manager = RequestManager(self, providers, middlewares)\n\n if modules is None:\n modules = get_default_modules()\n\n for module_name, module_class in modules.items():\n module_class.attach(self, module_name)\n\n @property\n def middleware_stack(self):\n return self.manager.middleware_stack\n\n @property\n def providers(self):\n return self.manager.providers\n\n def setProviders(self, providers):\n self.manager.setProvider(providers)\n\n @staticmethod\n @apply_to_return_value(HexBytes)\n def sha3(primitive=None, text=None, hexstr=None):\n if isinstance(primitive, (bytes, int, type(None))):\n input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)\n return keccak(input_bytes)\n\n raise TypeError(\n \"You called sha3 with first arg %r and keywords %r. You must call it with one of \"\n \"these approaches: sha3(text='txt'), sha3(hexstr='0x747874'), \"\n \"sha3(b'\\\\x74\\\\x78\\\\x74'), or sha3(0x747874).\" % (\n primitive,\n {'text': text, 'hexstr': hexstr}\n )\n )\n\n @classmethod\n def soliditySha3(cls, abi_types, values):\n \"\"\"\n Executes sha3 (keccak256) exactly as Solidity does.\n Takes list of abi_types as inputs -- `[uint24, int8[], bool]`\n and list of corresponding values -- `[20, [-1, 5, 0], True]`\n \"\"\"\n if len(abi_types) != len(values):\n raise ValueError(\n \"Length mismatch between provided abi types and values. Got \"\n \"{0} types and {1} values.\".format(len(abi_types), len(values))\n )\n\n hex_string = add_0x_prefix(''.join(\n remove_0x_prefix(hex_encode_abi_type(abi_type, value))\n for abi_type, value\n in zip(abi_types, values)\n ))\n return cls.sha3(hexstr=hex_string)\n\n def isConnected(self):\n for provider in self.providers:\n if provider.isConnected():\n return True\n else:\n return False\n", "path": "web3/main.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom eth_utils import (\n apply_to_return_value,\n add_0x_prefix,\n from_wei,\n is_address,\n is_checksum_address,\n keccak,\n remove_0x_prefix,\n to_checksum_address,\n to_wei,\n)\n\nfrom web3.admin import Admin\nfrom web3.eth import Eth\nfrom web3.iban import Iban\nfrom web3.miner import Miner\nfrom web3.net import Net\nfrom web3.personal import Personal\nfrom web3.testing import Testing\nfrom web3.txpool import TxPool\nfrom web3.version import Version\n\nfrom web3.providers.ipc import (\n IPCProvider,\n)\nfrom web3.providers.rpc import (\n HTTPProvider,\n)\nfrom web3.providers.tester import (\n TestRPCProvider,\n EthereumTesterProvider,\n)\n\nfrom web3.manager import (\n RequestManager,\n)\n\nfrom web3.utils.datastructures import (\n HexBytes,\n)\nfrom web3.utils.encoding import (\n hex_encode_abi_type,\n to_bytes,\n to_int,\n to_hex,\n to_text,\n)\n\n\ndef get_default_modules():\n return {\n \"eth\": Eth,\n \"net\": Net,\n \"personal\": Personal,\n \"version\": Version,\n \"txpool\": TxPool,\n \"miner\": Miner,\n \"admin\": Admin,\n \"testing\": Testing,\n }\n\n\nclass Web3(object):\n # Providers\n HTTPProvider = HTTPProvider\n IPCProvider = IPCProvider\n TestRPCProvider = TestRPCProvider\n EthereumTesterProvider = EthereumTesterProvider\n\n # Managers\n RequestManager = RequestManager\n\n # Iban\n Iban = Iban\n\n # Encoding and Decoding\n toBytes = staticmethod(to_bytes)\n toInt = staticmethod(to_int)\n toHex = staticmethod(to_hex)\n toText = staticmethod(to_text)\n\n # Currency Utility\n toWei = staticmethod(to_wei)\n fromWei = staticmethod(from_wei)\n\n # Address Utility\n isAddress = staticmethod(is_address)\n isChecksumAddress = staticmethod(is_checksum_address)\n toChecksumAddress = staticmethod(to_checksum_address)\n\n def __init__(self, providers, middlewares=None, modules=None):\n self.manager = RequestManager(self, providers, middlewares)\n\n if modules is None:\n modules = get_default_modules()\n\n for module_name, module_class in modules.items():\n module_class.attach(self, module_name)\n\n @property\n def middleware_stack(self):\n return self.manager.middleware_stack\n\n @property\n def providers(self):\n return self.manager.providers\n\n def setProviders(self, providers):\n self.manager.setProvider(providers)\n\n @staticmethod\n @apply_to_return_value(HexBytes)\n def sha3(primitive=None, text=None, hexstr=None):\n if isinstance(primitive, (bytes, int, type(None))):\n input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)\n return keccak(input_bytes)\n\n raise TypeError(\n \"You called sha3 with first arg %r and keywords %r. You must call it with one of \"\n \"these approaches: sha3(text='txt'), sha3(hexstr='0x747874'), \"\n \"sha3(b'\\\\x74\\\\x78\\\\x74'), or sha3(0x747874).\" % (\n primitive,\n {'text': text, 'hexstr': hexstr}\n )\n )\n\n @classmethod\n def soliditySha3(cls, abi_types, values):\n \"\"\"\n Executes sha3 (keccak256) exactly as Solidity does.\n Takes list of abi_types as inputs -- `[uint24, int8[], bool]`\n and list of corresponding values -- `[20, [-1, 5, 0], True]`\n \"\"\"\n if len(abi_types) != len(values):\n raise ValueError(\n \"Length mismatch between provided abi types and values. Got \"\n \"{0} types and {1} values.\".format(len(abi_types), len(values))\n )\n\n hex_string = add_0x_prefix(''.join(\n remove_0x_prefix(hex_encode_abi_type(abi_type, value))\n for abi_type, value\n in zip(abi_types, values)\n ))\n return cls.sha3(hexstr=hex_string)\n\n def isConnected(self):\n for provider in self.providers:\n if provider.isConnected():\n return True\n else:\n return False\n", "path": "web3/main.py"}]}
1,668
148
gh_patches_debug_2327
rasdani/github-patches
git_diff
encode__httpx-194
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Missing py.typed declaration? `mypy` is complaining about not being able to find type annotations for `httpx`: `error: Cannot find module named 'httpx'` I'm somewhat new to using type annotations/static type checking in Python, but from the mypy documentation [here](https://mypy.readthedocs.io/en/latest/installed_packages.html#making-pep-561-compatible-packages) it looks like there may be a missing declaration in `setup.py`? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 import os 5 import re 6 7 from setuptools import setup 8 9 10 def get_version(package): 11 """ 12 Return package version as listed in `__version__` in `init.py`. 13 """ 14 with open(os.path.join(package, "__version__.py")) as f: 15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", f.read()).group(1) 16 17 18 def get_long_description(): 19 """ 20 Return the README. 21 """ 22 with open("README.md", encoding="utf8") as f: 23 return f.read() 24 25 26 def get_packages(package): 27 """ 28 Return root package and all sub-packages. 29 """ 30 return [ 31 dirpath 32 for dirpath, dirnames, filenames in os.walk(package) 33 if os.path.exists(os.path.join(dirpath, "__init__.py")) 34 ] 35 36 37 setup( 38 name="httpx", 39 python_requires=">=3.6", 40 version=get_version("httpx"), 41 url="https://github.com/encode/httpx", 42 license="BSD", 43 description="The next generation HTTP client.", 44 long_description=get_long_description(), 45 long_description_content_type="text/markdown", 46 author="Tom Christie", 47 author_email="tom@tomchristie.com", 48 packages=get_packages("httpx"), 49 install_requires=[ 50 "certifi", 51 "chardet==3.*", 52 "h11==0.8.*", 53 "h2==3.*", 54 "hstspreload", 55 "idna==2.*", 56 "rfc3986==1.*", 57 ], 58 classifiers=[ 59 "Development Status :: 3 - Alpha", 60 "Environment :: Web Environment", 61 "Intended Audience :: Developers", 62 "License :: OSI Approved :: BSD License", 63 "Operating System :: OS Independent", 64 "Topic :: Internet :: WWW/HTTP", 65 "Programming Language :: Python :: 3", 66 "Programming Language :: Python :: 3.6", 67 "Programming Language :: Python :: 3.7", 68 "Programming Language :: Python :: 3.8", 69 ], 70 ) 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -45,6 +45,7 @@ long_description_content_type="text/markdown", author="Tom Christie", author_email="tom@tomchristie.com", + package_data={"httpx": ["py.typed"]}, packages=get_packages("httpx"), install_requires=[ "certifi",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,6 +45,7 @@\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"tom@tomchristie.com\",\n+ package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n install_requires=[\n \"certifi\",\n", "issue": "Missing py.typed declaration?\n`mypy` is complaining about not being able to find type annotations for `httpx`: \r\n\r\n`error: Cannot find module named 'httpx'`\r\n\r\nI'm somewhat new to using type annotations/static type checking in Python, but from the mypy documentation [here](https://mypy.readthedocs.io/en/latest/installed_packages.html#making-pep-561-compatible-packages) it looks like there may be a missing declaration in `setup.py`?\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n with open(os.path.join(package, \"__version__.py\")) as f:\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", f.read()).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n return f.read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"tom@tomchristie.com\",\n packages=get_packages(\"httpx\"),\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"hstspreload\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n with open(os.path.join(package, \"__version__.py\")) as f:\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", f.read()).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n return f.read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"tom@tomchristie.com\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"hstspreload\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}]}
947
90
gh_patches_debug_5891
rasdani/github-patches
git_diff
sublimelsp__LSP-1732
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `os.path.relpath` may throw an exception on Windows. `os.path.relpath` may throw an exception on Windows. ``` Traceback (most recent call last): File "C:\tools\sublime\Data\Installed Packages\LSP.sublime-package\plugin/references.py", line 55, in File "C:\tools\sublime\Data\Installed Packages\LSP.sublime-package\plugin/references.py", line 62, in _handle_response File "C:\tools\sublime\Data\Installed Packages\LSP.sublime-package\plugin/references.py", line 85, in _show_references_in_output_panel File "C:\tools\sublime\Data\Installed Packages\LSP.sublime-package\plugin/references.py", line 107, in _get_relative_path File "./python3.3/ntpath.py", line 564, in relpath ValueError: path is on mount 'C:', start on mount '\myserver\myshare' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugin/references.py` Content: ``` 1 from .core.panels import ensure_panel 2 from .core.protocol import Location 3 from .core.protocol import Point 4 from .core.protocol import Request 5 from .core.registry import get_position 6 from .core.registry import LspTextCommand 7 from .core.sessions import Session 8 from .core.settings import PLUGIN_NAME 9 from .core.settings import userprefs 10 from .core.types import ClientConfig 11 from .core.types import PANEL_FILE_REGEX 12 from .core.types import PANEL_LINE_REGEX 13 from .core.typing import Dict, List, Optional, Tuple 14 from .core.views import get_line 15 from .core.views import get_uri_and_position_from_location 16 from .core.views import text_document_position_params 17 from .locationpicker import LocationPicker 18 import functools 19 import linecache 20 import os 21 import sublime 22 23 24 def ensure_references_panel(window: sublime.Window) -> Optional[sublime.View]: 25 return ensure_panel(window, "references", PANEL_FILE_REGEX, PANEL_LINE_REGEX, 26 "Packages/" + PLUGIN_NAME + "/Syntaxes/References.sublime-syntax") 27 28 29 class LspSymbolReferencesCommand(LspTextCommand): 30 31 capability = 'referencesProvider' 32 33 def __init__(self, view: sublime.View) -> None: 34 super().__init__(view) 35 self._picker = None # type: Optional[LocationPicker] 36 37 def run(self, _: sublime.Edit, event: Optional[dict] = None, point: Optional[int] = None) -> None: 38 session = self.best_session(self.capability) 39 file_path = self.view.file_name() 40 pos = get_position(self.view, event, point) 41 if session and file_path and pos is not None: 42 params = text_document_position_params(self.view, pos) 43 params['context'] = {"includeDeclaration": False} 44 request = Request("textDocument/references", params, self.view, progress=True) 45 session.send_request( 46 request, 47 functools.partial( 48 self._handle_response_async, 49 self.view.substr(self.view.word(pos)), 50 session 51 ) 52 ) 53 54 def _handle_response_async(self, word: str, session: Session, response: Optional[List[Location]]) -> None: 55 sublime.set_timeout(lambda: self._handle_response(word, session, response)) 56 57 def _handle_response(self, word: str, session: Session, response: Optional[List[Location]]) -> None: 58 if response: 59 if userprefs().show_references_in_quick_panel: 60 self._show_references_in_quick_panel(session, response) 61 else: 62 self._show_references_in_output_panel(word, session, response) 63 else: 64 window = self.view.window() 65 if window: 66 window.status_message("No references found") 67 68 def _show_references_in_quick_panel(self, session: Session, locations: List[Location]) -> None: 69 self.view.run_command("add_jump_record", {"selection": [(r.a, r.b) for r in self.view.sel()]}) 70 LocationPicker(self.view, session, locations, side_by_side=False) 71 72 def _show_references_in_output_panel(self, word: str, session: Session, locations: List[Location]) -> None: 73 window = session.window 74 panel = ensure_references_panel(window) 75 if not panel: 76 return 77 manager = session.manager() 78 if not manager: 79 return 80 base_dir = manager.get_project_path(self.view.file_name() or "") 81 to_render = [] # type: List[str] 82 references_count = 0 83 references_by_file = _group_locations_by_uri(window, session.config, locations) 84 for file, references in references_by_file.items(): 85 to_render.append('{}:'.format(_get_relative_path(base_dir, file))) 86 for reference in references: 87 references_count += 1 88 point, line = reference 89 to_render.append('{:>5}:{:<4} {}'.format(point.row + 1, point.col + 1, line)) 90 to_render.append("") # add spacing between filenames 91 characters = "\n".join(to_render) 92 panel.settings().set("result_base_dir", base_dir) 93 panel.run_command("lsp_clear_panel") 94 window.run_command("show_panel", {"panel": "output.references"}) 95 panel.run_command('append', { 96 'characters': "{} references for '{}'\n\n{}".format(references_count, word, characters), 97 'force': True, 98 'scroll_to_end': False 99 }) 100 # highlight all word occurrences 101 regions = panel.find_all(r"\b{}\b".format(word)) 102 panel.add_regions('ReferenceHighlight', regions, 'comment', flags=sublime.DRAW_OUTLINED) 103 104 105 def _get_relative_path(base_dir: Optional[str], file_path: str) -> str: 106 if base_dir: 107 return os.path.relpath(file_path, base_dir) 108 else: 109 return file_path 110 111 112 def _group_locations_by_uri( 113 window: sublime.Window, 114 config: ClientConfig, 115 locations: List[Location] 116 ) -> Dict[str, List[Tuple[Point, str]]]: 117 """Return a dictionary that groups locations by the URI it belongs.""" 118 grouped_locations = {} # type: Dict[str, List[Tuple[Point, str]]] 119 for location in locations: 120 uri, position = get_uri_and_position_from_location(location) 121 file_path = config.map_server_uri_to_client_path(uri) 122 point = Point.from_lsp(position) 123 # get line of the reference, to showcase its use 124 reference_line = get_line(window, file_path, point.row) 125 if grouped_locations.get(file_path) is None: 126 grouped_locations[file_path] = [] 127 grouped_locations[file_path].append((point, reference_line)) 128 # we don't want to cache the line, we always want to get fresh data 129 linecache.clearcache() 130 return grouped_locations 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugin/references.py b/plugin/references.py --- a/plugin/references.py +++ b/plugin/references.py @@ -104,9 +104,12 @@ def _get_relative_path(base_dir: Optional[str], file_path: str) -> str: if base_dir: - return os.path.relpath(file_path, base_dir) - else: - return file_path + try: + return os.path.relpath(file_path, base_dir) + except ValueError: + # On Windows, ValueError is raised when path and start are on different drives. + pass + return file_path def _group_locations_by_uri(
{"golden_diff": "diff --git a/plugin/references.py b/plugin/references.py\n--- a/plugin/references.py\n+++ b/plugin/references.py\n@@ -104,9 +104,12 @@\n \n def _get_relative_path(base_dir: Optional[str], file_path: str) -> str:\n if base_dir:\n- return os.path.relpath(file_path, base_dir)\n- else:\n- return file_path\n+ try:\n+ return os.path.relpath(file_path, base_dir)\n+ except ValueError:\n+ # On Windows, ValueError is raised when path and start are on different drives.\n+ pass\n+ return file_path\n \n \n def _group_locations_by_uri(\n", "issue": "`os.path.relpath` may throw an exception on Windows.\n`os.path.relpath` may throw an exception on Windows.\r\n\r\n```\r\nTraceback (most recent call last):\r\nFile \"C:\\tools\\sublime\\Data\\Installed Packages\\LSP.sublime-package\\plugin/references.py\", line 55, in \r\nFile \"C:\\tools\\sublime\\Data\\Installed Packages\\LSP.sublime-package\\plugin/references.py\", line 62, in _handle_response\r\nFile \"C:\\tools\\sublime\\Data\\Installed Packages\\LSP.sublime-package\\plugin/references.py\", line 85, in _show_references_in_output_panel\r\nFile \"C:\\tools\\sublime\\Data\\Installed Packages\\LSP.sublime-package\\plugin/references.py\", line 107, in _get_relative_path\r\nFile \"./python3.3/ntpath.py\", line 564, in relpath\r\nValueError: path is on mount 'C:', start on mount '\\myserver\\myshare'\r\n```\n", "before_files": [{"content": "from .core.panels import ensure_panel\nfrom .core.protocol import Location\nfrom .core.protocol import Point\nfrom .core.protocol import Request\nfrom .core.registry import get_position\nfrom .core.registry import LspTextCommand\nfrom .core.sessions import Session\nfrom .core.settings import PLUGIN_NAME\nfrom .core.settings import userprefs\nfrom .core.types import ClientConfig\nfrom .core.types import PANEL_FILE_REGEX\nfrom .core.types import PANEL_LINE_REGEX\nfrom .core.typing import Dict, List, Optional, Tuple\nfrom .core.views import get_line\nfrom .core.views import get_uri_and_position_from_location\nfrom .core.views import text_document_position_params\nfrom .locationpicker import LocationPicker\nimport functools\nimport linecache\nimport os\nimport sublime\n\n\ndef ensure_references_panel(window: sublime.Window) -> Optional[sublime.View]:\n return ensure_panel(window, \"references\", PANEL_FILE_REGEX, PANEL_LINE_REGEX,\n \"Packages/\" + PLUGIN_NAME + \"/Syntaxes/References.sublime-syntax\")\n\n\nclass LspSymbolReferencesCommand(LspTextCommand):\n\n capability = 'referencesProvider'\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._picker = None # type: Optional[LocationPicker]\n\n def run(self, _: sublime.Edit, event: Optional[dict] = None, point: Optional[int] = None) -> None:\n session = self.best_session(self.capability)\n file_path = self.view.file_name()\n pos = get_position(self.view, event, point)\n if session and file_path and pos is not None:\n params = text_document_position_params(self.view, pos)\n params['context'] = {\"includeDeclaration\": False}\n request = Request(\"textDocument/references\", params, self.view, progress=True)\n session.send_request(\n request,\n functools.partial(\n self._handle_response_async,\n self.view.substr(self.view.word(pos)),\n session\n )\n )\n\n def _handle_response_async(self, word: str, session: Session, response: Optional[List[Location]]) -> None:\n sublime.set_timeout(lambda: self._handle_response(word, session, response))\n\n def _handle_response(self, word: str, session: Session, response: Optional[List[Location]]) -> None:\n if response:\n if userprefs().show_references_in_quick_panel:\n self._show_references_in_quick_panel(session, response)\n else:\n self._show_references_in_output_panel(word, session, response)\n else:\n window = self.view.window()\n if window:\n window.status_message(\"No references found\")\n\n def _show_references_in_quick_panel(self, session: Session, locations: List[Location]) -> None:\n self.view.run_command(\"add_jump_record\", {\"selection\": [(r.a, r.b) for r in self.view.sel()]})\n LocationPicker(self.view, session, locations, side_by_side=False)\n\n def _show_references_in_output_panel(self, word: str, session: Session, locations: List[Location]) -> None:\n window = session.window\n panel = ensure_references_panel(window)\n if not panel:\n return\n manager = session.manager()\n if not manager:\n return\n base_dir = manager.get_project_path(self.view.file_name() or \"\")\n to_render = [] # type: List[str]\n references_count = 0\n references_by_file = _group_locations_by_uri(window, session.config, locations)\n for file, references in references_by_file.items():\n to_render.append('{}:'.format(_get_relative_path(base_dir, file)))\n for reference in references:\n references_count += 1\n point, line = reference\n to_render.append('{:>5}:{:<4} {}'.format(point.row + 1, point.col + 1, line))\n to_render.append(\"\") # add spacing between filenames\n characters = \"\\n\".join(to_render)\n panel.settings().set(\"result_base_dir\", base_dir)\n panel.run_command(\"lsp_clear_panel\")\n window.run_command(\"show_panel\", {\"panel\": \"output.references\"})\n panel.run_command('append', {\n 'characters': \"{} references for '{}'\\n\\n{}\".format(references_count, word, characters),\n 'force': True,\n 'scroll_to_end': False\n })\n # highlight all word occurrences\n regions = panel.find_all(r\"\\b{}\\b\".format(word))\n panel.add_regions('ReferenceHighlight', regions, 'comment', flags=sublime.DRAW_OUTLINED)\n\n\ndef _get_relative_path(base_dir: Optional[str], file_path: str) -> str:\n if base_dir:\n return os.path.relpath(file_path, base_dir)\n else:\n return file_path\n\n\ndef _group_locations_by_uri(\n window: sublime.Window,\n config: ClientConfig,\n locations: List[Location]\n) -> Dict[str, List[Tuple[Point, str]]]:\n \"\"\"Return a dictionary that groups locations by the URI it belongs.\"\"\"\n grouped_locations = {} # type: Dict[str, List[Tuple[Point, str]]]\n for location in locations:\n uri, position = get_uri_and_position_from_location(location)\n file_path = config.map_server_uri_to_client_path(uri)\n point = Point.from_lsp(position)\n # get line of the reference, to showcase its use\n reference_line = get_line(window, file_path, point.row)\n if grouped_locations.get(file_path) is None:\n grouped_locations[file_path] = []\n grouped_locations[file_path].append((point, reference_line))\n # we don't want to cache the line, we always want to get fresh data\n linecache.clearcache()\n return grouped_locations\n", "path": "plugin/references.py"}], "after_files": [{"content": "from .core.panels import ensure_panel\nfrom .core.protocol import Location\nfrom .core.protocol import Point\nfrom .core.protocol import Request\nfrom .core.registry import get_position\nfrom .core.registry import LspTextCommand\nfrom .core.sessions import Session\nfrom .core.settings import PLUGIN_NAME\nfrom .core.settings import userprefs\nfrom .core.types import ClientConfig\nfrom .core.types import PANEL_FILE_REGEX\nfrom .core.types import PANEL_LINE_REGEX\nfrom .core.typing import Dict, List, Optional, Tuple\nfrom .core.views import get_line\nfrom .core.views import get_uri_and_position_from_location\nfrom .core.views import text_document_position_params\nfrom .locationpicker import LocationPicker\nimport functools\nimport linecache\nimport os\nimport sublime\n\n\ndef ensure_references_panel(window: sublime.Window) -> Optional[sublime.View]:\n return ensure_panel(window, \"references\", PANEL_FILE_REGEX, PANEL_LINE_REGEX,\n \"Packages/\" + PLUGIN_NAME + \"/Syntaxes/References.sublime-syntax\")\n\n\nclass LspSymbolReferencesCommand(LspTextCommand):\n\n capability = 'referencesProvider'\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._picker = None # type: Optional[LocationPicker]\n\n def run(self, _: sublime.Edit, event: Optional[dict] = None, point: Optional[int] = None) -> None:\n session = self.best_session(self.capability)\n file_path = self.view.file_name()\n pos = get_position(self.view, event, point)\n if session and file_path and pos is not None:\n params = text_document_position_params(self.view, pos)\n params['context'] = {\"includeDeclaration\": False}\n request = Request(\"textDocument/references\", params, self.view, progress=True)\n session.send_request(\n request,\n functools.partial(\n self._handle_response_async,\n self.view.substr(self.view.word(pos)),\n session\n )\n )\n\n def _handle_response_async(self, word: str, session: Session, response: Optional[List[Location]]) -> None:\n sublime.set_timeout(lambda: self._handle_response(word, session, response))\n\n def _handle_response(self, word: str, session: Session, response: Optional[List[Location]]) -> None:\n if response:\n if userprefs().show_references_in_quick_panel:\n self._show_references_in_quick_panel(session, response)\n else:\n self._show_references_in_output_panel(word, session, response)\n else:\n window = self.view.window()\n if window:\n window.status_message(\"No references found\")\n\n def _show_references_in_quick_panel(self, session: Session, locations: List[Location]) -> None:\n self.view.run_command(\"add_jump_record\", {\"selection\": [(r.a, r.b) for r in self.view.sel()]})\n LocationPicker(self.view, session, locations, side_by_side=False)\n\n def _show_references_in_output_panel(self, word: str, session: Session, locations: List[Location]) -> None:\n window = session.window\n panel = ensure_references_panel(window)\n if not panel:\n return\n manager = session.manager()\n if not manager:\n return\n base_dir = manager.get_project_path(self.view.file_name() or \"\")\n to_render = [] # type: List[str]\n references_count = 0\n references_by_file = _group_locations_by_uri(window, session.config, locations)\n for file, references in references_by_file.items():\n to_render.append('{}:'.format(_get_relative_path(base_dir, file)))\n for reference in references:\n references_count += 1\n point, line = reference\n to_render.append('{:>5}:{:<4} {}'.format(point.row + 1, point.col + 1, line))\n to_render.append(\"\") # add spacing between filenames\n characters = \"\\n\".join(to_render)\n panel.settings().set(\"result_base_dir\", base_dir)\n panel.run_command(\"lsp_clear_panel\")\n window.run_command(\"show_panel\", {\"panel\": \"output.references\"})\n panel.run_command('append', {\n 'characters': \"{} references for '{}'\\n\\n{}\".format(references_count, word, characters),\n 'force': True,\n 'scroll_to_end': False\n })\n # highlight all word occurrences\n regions = panel.find_all(r\"\\b{}\\b\".format(word))\n panel.add_regions('ReferenceHighlight', regions, 'comment', flags=sublime.DRAW_OUTLINED)\n\n\ndef _get_relative_path(base_dir: Optional[str], file_path: str) -> str:\n if base_dir:\n try:\n return os.path.relpath(file_path, base_dir)\n except ValueError:\n # On Windows, ValueError is raised when path and start are on different drives.\n pass\n return file_path\n\n\ndef _group_locations_by_uri(\n window: sublime.Window,\n config: ClientConfig,\n locations: List[Location]\n) -> Dict[str, List[Tuple[Point, str]]]:\n \"\"\"Return a dictionary that groups locations by the URI it belongs.\"\"\"\n grouped_locations = {} # type: Dict[str, List[Tuple[Point, str]]]\n for location in locations:\n uri, position = get_uri_and_position_from_location(location)\n file_path = config.map_server_uri_to_client_path(uri)\n point = Point.from_lsp(position)\n # get line of the reference, to showcase its use\n reference_line = get_line(window, file_path, point.row)\n if grouped_locations.get(file_path) is None:\n grouped_locations[file_path] = []\n grouped_locations[file_path].append((point, reference_line))\n # we don't want to cache the line, we always want to get fresh data\n linecache.clearcache()\n return grouped_locations\n", "path": "plugin/references.py"}]}
1,973
149
gh_patches_debug_7263
rasdani/github-patches
git_diff
iterative__dvc-5753
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- exp show: failing with rich==10.0.0 ```console $ dvc exp show dvc exp show -v 2021-03-29 11:30:45,071 DEBUG: Check for update is disabled. 2021-03-29 11:30:46,006 ERROR: unexpected error - 'int' object has no attribute 'max_width' ------------------------------------------------------------ Traceback (most recent call last): File "/home/saugat/repos/iterative/dvc/dvc/main.py", line 55, in main ret = cmd.run() File "/home/saugat/repos/iterative/dvc/dvc/command/experiments.py", line 411, in run measurement = table.__rich_measure__(console, SHOW_MAX_WIDTH) File "/home/saugat/venvs/dvc/env39/lib/python3.9/site-packages/rich/table.py", line 287, in __rich_measure__ max_width = options.max_width AttributeError: 'int' object has no attribute 'max_width' ------------------------------------------------------------ 2021-03-29 11:30:47,022 DEBUG: Version info for developers: DVC version: 2.0.11+f8c567 --------------------------------- Platform: Python 3.9.2 on Linux-5.11.8-arch1-1-x86_64-with-glibc2.33 Supports: All remotes Cache types: hardlink, symlink Cache directory: ext4 on /dev/sda9 Caches: local Remotes: https Workspace directory: ext4 on /dev/sda9 Repo: dvc, git Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help! ``` This is also breaking our linter ([here](https://github.com/iterative/dvc/runs/2214172187?check_suite_focus=true#step:7:250 )) and tests as well due to the change in rich's internal API that we are using: https://github.com/iterative/dvc/blob/1a25ebe3bd2eda4c3612e408fb503d64490fb56c/dvc/utils/table.py#L59 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/utils/table.py` Content: ``` 1 from dataclasses import dataclass 2 from typing import TYPE_CHECKING, List, cast 3 4 from rich.style import StyleType 5 from rich.table import Column as RichColumn 6 from rich.table import Table as RichTable 7 8 if TYPE_CHECKING: 9 from rich.console import ( 10 Console, 11 ConsoleOptions, 12 JustifyMethod, 13 OverflowMethod, 14 RenderableType, 15 ) 16 17 18 @dataclass 19 class Column(RichColumn): 20 collapse: bool = False 21 22 23 class Table(RichTable): 24 def add_column( # pylint: disable=arguments-differ 25 self, 26 header: "RenderableType" = "", 27 footer: "RenderableType" = "", 28 *, 29 header_style: StyleType = None, 30 footer_style: StyleType = None, 31 style: StyleType = None, 32 justify: "JustifyMethod" = "left", 33 overflow: "OverflowMethod" = "ellipsis", 34 width: int = None, 35 min_width: int = None, 36 max_width: int = None, 37 ratio: int = None, 38 no_wrap: bool = False, 39 collapse: bool = False, 40 ) -> None: 41 column = Column( # type: ignore[call-arg] 42 _index=len(self.columns), 43 header=header, 44 footer=footer, 45 header_style=header_style or "", 46 footer_style=footer_style or "", 47 style=style or "", 48 justify=justify, 49 overflow=overflow, 50 width=width, 51 min_width=min_width, 52 max_width=max_width, 53 ratio=ratio, 54 no_wrap=no_wrap, 55 collapse=collapse, 56 ) 57 self.columns.append(column) 58 59 def _calculate_column_widths( 60 self, console: "Console", options: "ConsoleOptions" 61 ) -> List[int]: 62 """Calculate the widths of each column, including padding, not 63 including borders. 64 65 Adjacent collapsed columns will be removed until there is only a single 66 truncated column remaining. 67 """ 68 widths = super()._calculate_column_widths(console, options) 69 last_collapsed = -1 70 columns = cast(List[Column], self.columns) 71 for i in range(len(columns) - 1, -1, -1): 72 if widths[i] == 1 and columns[i].collapse: 73 if last_collapsed >= 0: 74 del widths[last_collapsed] 75 del columns[last_collapsed] 76 if self.box: 77 options.max_width += 1 78 for column in columns[last_collapsed:]: 79 column._index -= 1 80 last_collapsed = i 81 padding = self._get_padding_width(i) 82 if ( 83 columns[i].overflow == "ellipsis" 84 and (sum(widths) + padding) <= options.max_width 85 ): 86 # Set content width to 1 (plus padding) if we can fit a 87 # single unicode ellipsis in this column 88 widths[i] = 1 + padding 89 else: 90 last_collapsed = -1 91 return widths 92 93 def _collapse_widths( # type: ignore[override] 94 self, widths: List[int], wrapable: List[bool], max_width: int, 95 ) -> List[int]: 96 """Collapse columns right-to-left if possible to fit table into 97 max_width. 98 99 If table is still too wide after collapsing, rich's automatic overflow 100 handling will be used. 101 """ 102 columns = cast(List[Column], self.columns) 103 collapsible = [column.collapse for column in columns] 104 total_width = sum(widths) 105 excess_width = total_width - max_width 106 if any(collapsible): 107 for i in range(len(widths) - 1, -1, -1): 108 if collapsible[i]: 109 total_width -= widths[i] 110 excess_width -= widths[i] 111 widths[i] = 0 112 if excess_width <= 0: 113 break 114 return super()._collapse_widths(widths, wrapable, max_width) 115 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/utils/table.py b/dvc/utils/table.py --- a/dvc/utils/table.py +++ b/dvc/utils/table.py @@ -69,7 +69,7 @@ last_collapsed = -1 columns = cast(List[Column], self.columns) for i in range(len(columns) - 1, -1, -1): - if widths[i] == 1 and columns[i].collapse: + if widths[i] == 0 and columns[i].collapse: if last_collapsed >= 0: del widths[last_collapsed] del columns[last_collapsed]
{"golden_diff": "diff --git a/dvc/utils/table.py b/dvc/utils/table.py\n--- a/dvc/utils/table.py\n+++ b/dvc/utils/table.py\n@@ -69,7 +69,7 @@\n last_collapsed = -1\n columns = cast(List[Column], self.columns)\n for i in range(len(columns) - 1, -1, -1):\n- if widths[i] == 1 and columns[i].collapse:\n+ if widths[i] == 0 and columns[i].collapse:\n if last_collapsed >= 0:\n del widths[last_collapsed]\n del columns[last_collapsed]\n", "issue": "exp show: failing with rich==10.0.0\n```console\r\n$ dvc exp show\r\ndvc exp show -v\r\n2021-03-29 11:30:45,071 DEBUG: Check for update is disabled.\r\n2021-03-29 11:30:46,006 ERROR: unexpected error - 'int' object has no attribute 'max_width'\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/home/saugat/repos/iterative/dvc/dvc/main.py\", line 55, in main\r\n ret = cmd.run()\r\n File \"/home/saugat/repos/iterative/dvc/dvc/command/experiments.py\", line 411, in run\r\n measurement = table.__rich_measure__(console, SHOW_MAX_WIDTH)\r\n File \"/home/saugat/venvs/dvc/env39/lib/python3.9/site-packages/rich/table.py\", line 287, in __rich_measure__\r\n max_width = options.max_width\r\nAttributeError: 'int' object has no attribute 'max_width'\r\n------------------------------------------------------------\r\n2021-03-29 11:30:47,022 DEBUG: Version info for developers:\r\nDVC version: 2.0.11+f8c567 \r\n---------------------------------\r\nPlatform: Python 3.9.2 on Linux-5.11.8-arch1-1-x86_64-with-glibc2.33\r\nSupports: All remotes\r\nCache types: hardlink, symlink\r\nCache directory: ext4 on /dev/sda9\r\nCaches: local\r\nRemotes: https\r\nWorkspace directory: ext4 on /dev/sda9\r\nRepo: dvc, git\r\n\r\nHaving any troubles? Hit us up at https://dvc.org/support, we are always happy to help!\r\n```\r\n\r\n\r\nThis is also breaking our linter ([here](https://github.com/iterative/dvc/runs/2214172187?check_suite_focus=true#step:7:250\r\n)) and tests as well due to the change in rich's internal API that we are using:\r\nhttps://github.com/iterative/dvc/blob/1a25ebe3bd2eda4c3612e408fb503d64490fb56c/dvc/utils/table.py#L59\r\n\r\n\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom typing import TYPE_CHECKING, List, cast\n\nfrom rich.style import StyleType\nfrom rich.table import Column as RichColumn\nfrom rich.table import Table as RichTable\n\nif TYPE_CHECKING:\n from rich.console import (\n Console,\n ConsoleOptions,\n JustifyMethod,\n OverflowMethod,\n RenderableType,\n )\n\n\n@dataclass\nclass Column(RichColumn):\n collapse: bool = False\n\n\nclass Table(RichTable):\n def add_column( # pylint: disable=arguments-differ\n self,\n header: \"RenderableType\" = \"\",\n footer: \"RenderableType\" = \"\",\n *,\n header_style: StyleType = None,\n footer_style: StyleType = None,\n style: StyleType = None,\n justify: \"JustifyMethod\" = \"left\",\n overflow: \"OverflowMethod\" = \"ellipsis\",\n width: int = None,\n min_width: int = None,\n max_width: int = None,\n ratio: int = None,\n no_wrap: bool = False,\n collapse: bool = False,\n ) -> None:\n column = Column( # type: ignore[call-arg]\n _index=len(self.columns),\n header=header,\n footer=footer,\n header_style=header_style or \"\",\n footer_style=footer_style or \"\",\n style=style or \"\",\n justify=justify,\n overflow=overflow,\n width=width,\n min_width=min_width,\n max_width=max_width,\n ratio=ratio,\n no_wrap=no_wrap,\n collapse=collapse,\n )\n self.columns.append(column)\n\n def _calculate_column_widths(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> List[int]:\n \"\"\"Calculate the widths of each column, including padding, not\n including borders.\n\n Adjacent collapsed columns will be removed until there is only a single\n truncated column remaining.\n \"\"\"\n widths = super()._calculate_column_widths(console, options)\n last_collapsed = -1\n columns = cast(List[Column], self.columns)\n for i in range(len(columns) - 1, -1, -1):\n if widths[i] == 1 and columns[i].collapse:\n if last_collapsed >= 0:\n del widths[last_collapsed]\n del columns[last_collapsed]\n if self.box:\n options.max_width += 1\n for column in columns[last_collapsed:]:\n column._index -= 1\n last_collapsed = i\n padding = self._get_padding_width(i)\n if (\n columns[i].overflow == \"ellipsis\"\n and (sum(widths) + padding) <= options.max_width\n ):\n # Set content width to 1 (plus padding) if we can fit a\n # single unicode ellipsis in this column\n widths[i] = 1 + padding\n else:\n last_collapsed = -1\n return widths\n\n def _collapse_widths( # type: ignore[override]\n self, widths: List[int], wrapable: List[bool], max_width: int,\n ) -> List[int]:\n \"\"\"Collapse columns right-to-left if possible to fit table into\n max_width.\n\n If table is still too wide after collapsing, rich's automatic overflow\n handling will be used.\n \"\"\"\n columns = cast(List[Column], self.columns)\n collapsible = [column.collapse for column in columns]\n total_width = sum(widths)\n excess_width = total_width - max_width\n if any(collapsible):\n for i in range(len(widths) - 1, -1, -1):\n if collapsible[i]:\n total_width -= widths[i]\n excess_width -= widths[i]\n widths[i] = 0\n if excess_width <= 0:\n break\n return super()._collapse_widths(widths, wrapable, max_width)\n", "path": "dvc/utils/table.py"}], "after_files": [{"content": "from dataclasses import dataclass\nfrom typing import TYPE_CHECKING, List, cast\n\nfrom rich.style import StyleType\nfrom rich.table import Column as RichColumn\nfrom rich.table import Table as RichTable\n\nif TYPE_CHECKING:\n from rich.console import (\n Console,\n ConsoleOptions,\n JustifyMethod,\n OverflowMethod,\n RenderableType,\n )\n\n\n@dataclass\nclass Column(RichColumn):\n collapse: bool = False\n\n\nclass Table(RichTable):\n def add_column( # pylint: disable=arguments-differ\n self,\n header: \"RenderableType\" = \"\",\n footer: \"RenderableType\" = \"\",\n *,\n header_style: StyleType = None,\n footer_style: StyleType = None,\n style: StyleType = None,\n justify: \"JustifyMethod\" = \"left\",\n overflow: \"OverflowMethod\" = \"ellipsis\",\n width: int = None,\n min_width: int = None,\n max_width: int = None,\n ratio: int = None,\n no_wrap: bool = False,\n collapse: bool = False,\n ) -> None:\n column = Column( # type: ignore[call-arg]\n _index=len(self.columns),\n header=header,\n footer=footer,\n header_style=header_style or \"\",\n footer_style=footer_style or \"\",\n style=style or \"\",\n justify=justify,\n overflow=overflow,\n width=width,\n min_width=min_width,\n max_width=max_width,\n ratio=ratio,\n no_wrap=no_wrap,\n collapse=collapse,\n )\n self.columns.append(column)\n\n def _calculate_column_widths(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> List[int]:\n \"\"\"Calculate the widths of each column, including padding, not\n including borders.\n\n Adjacent collapsed columns will be removed until there is only a single\n truncated column remaining.\n \"\"\"\n widths = super()._calculate_column_widths(console, options)\n last_collapsed = -1\n columns = cast(List[Column], self.columns)\n for i in range(len(columns) - 1, -1, -1):\n if widths[i] == 0 and columns[i].collapse:\n if last_collapsed >= 0:\n del widths[last_collapsed]\n del columns[last_collapsed]\n if self.box:\n options.max_width += 1\n for column in columns[last_collapsed:]:\n column._index -= 1\n last_collapsed = i\n padding = self._get_padding_width(i)\n if (\n columns[i].overflow == \"ellipsis\"\n and (sum(widths) + padding) <= options.max_width\n ):\n # Set content width to 1 (plus padding) if we can fit a\n # single unicode ellipsis in this column\n widths[i] = 1 + padding\n else:\n last_collapsed = -1\n return widths\n\n def _collapse_widths( # type: ignore[override]\n self, widths: List[int], wrapable: List[bool], max_width: int,\n ) -> List[int]:\n \"\"\"Collapse columns right-to-left if possible to fit table into\n max_width.\n\n If table is still too wide after collapsing, rich's automatic overflow\n handling will be used.\n \"\"\"\n columns = cast(List[Column], self.columns)\n collapsible = [column.collapse for column in columns]\n total_width = sum(widths)\n excess_width = total_width - max_width\n if any(collapsible):\n for i in range(len(widths) - 1, -1, -1):\n if collapsible[i]:\n total_width -= widths[i]\n excess_width -= widths[i]\n widths[i] = 0\n if excess_width <= 0:\n break\n return super()._collapse_widths(widths, wrapable, max_width)\n", "path": "dvc/utils/table.py"}]}
1,877
134
gh_patches_debug_10799
rasdani/github-patches
git_diff
optuna__optuna-1680
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use function annotation syntax for Type Hints. After dropping Python 2.7 support at #710, we can define type hints with function annotation syntax. ~~Do you have a plan to update the coding style guideline?~~ https://github.com/optuna/optuna/wiki/Coding-Style-Conventions ## Progress - [x] `optuna/integration/sklearn.py` (#1735) - [x] `optuna/study.py` - assigned to harpy ## Note to the questioner We still cannot use variable annotation syntax introduced by [PEP 526](https://www.python.org/dev/peps/pep-0526/) because we supports Python 3.5. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `optuna/pruners/_nop.py` Content: ``` 1 from optuna.pruners import BasePruner 2 from optuna import type_checking 3 4 if type_checking.TYPE_CHECKING: 5 from optuna.study import Study # NOQA 6 from optuna.trial import FrozenTrial # NOQA 7 8 9 class NopPruner(BasePruner): 10 """Pruner which never prunes trials. 11 12 Example: 13 14 .. testcode:: 15 16 import numpy as np 17 from sklearn.datasets import load_iris 18 from sklearn.linear_model import SGDClassifier 19 from sklearn.model_selection import train_test_split 20 21 import optuna 22 23 X, y = load_iris(return_X_y=True) 24 X_train, X_valid, y_train, y_valid = train_test_split(X, y) 25 classes = np.unique(y) 26 27 def objective(trial): 28 alpha = trial.suggest_uniform('alpha', 0.0, 1.0) 29 clf = SGDClassifier(alpha=alpha) 30 n_train_iter = 100 31 32 for step in range(n_train_iter): 33 clf.partial_fit(X_train, y_train, classes=classes) 34 35 intermediate_value = clf.score(X_valid, y_valid) 36 trial.report(intermediate_value, step) 37 38 if trial.should_prune(): 39 assert False, "should_prune() should always return False with this pruner." 40 raise optuna.TrialPruned() 41 42 return clf.score(X_valid, y_valid) 43 44 study = optuna.create_study(direction='maximize', 45 pruner=optuna.pruners.NopPruner()) 46 study.optimize(objective, n_trials=20) 47 """ 48 49 def prune(self, study, trial): 50 # type: (Study, FrozenTrial) -> bool 51 52 return False 53 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/optuna/pruners/_nop.py b/optuna/pruners/_nop.py --- a/optuna/pruners/_nop.py +++ b/optuna/pruners/_nop.py @@ -1,9 +1,5 @@ +import optuna from optuna.pruners import BasePruner -from optuna import type_checking - -if type_checking.TYPE_CHECKING: - from optuna.study import Study # NOQA - from optuna.trial import FrozenTrial # NOQA class NopPruner(BasePruner): @@ -46,7 +42,6 @@ study.optimize(objective, n_trials=20) """ - def prune(self, study, trial): - # type: (Study, FrozenTrial) -> bool + def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool: return False
{"golden_diff": "diff --git a/optuna/pruners/_nop.py b/optuna/pruners/_nop.py\n--- a/optuna/pruners/_nop.py\n+++ b/optuna/pruners/_nop.py\n@@ -1,9 +1,5 @@\n+import optuna\n from optuna.pruners import BasePruner\n-from optuna import type_checking\n-\n-if type_checking.TYPE_CHECKING:\n- from optuna.study import Study # NOQA\n- from optuna.trial import FrozenTrial # NOQA\n \n \n class NopPruner(BasePruner):\n@@ -46,7 +42,6 @@\n study.optimize(objective, n_trials=20)\n \"\"\"\n \n- def prune(self, study, trial):\n- # type: (Study, FrozenTrial) -> bool\n+ def prune(self, study: \"optuna.study.Study\", trial: \"optuna.trial.FrozenTrial\") -> bool:\n \n return False\n", "issue": "Use function annotation syntax for Type Hints.\nAfter dropping Python 2.7 support at #710, we can define type hints with function annotation syntax. \r\n~~Do you have a plan to update the coding style guideline?~~\r\nhttps://github.com/optuna/optuna/wiki/Coding-Style-Conventions\r\n\r\n## Progress\r\n\r\n- [x] `optuna/integration/sklearn.py` (#1735)\r\n- [x] `optuna/study.py` - assigned to harpy\r\n\r\n## Note to the questioner\r\n\r\nWe still cannot use variable annotation syntax introduced by [PEP 526](https://www.python.org/dev/peps/pep-0526/) because we supports Python 3.5.\n", "before_files": [{"content": "from optuna.pruners import BasePruner\nfrom optuna import type_checking\n\nif type_checking.TYPE_CHECKING:\n from optuna.study import Study # NOQA\n from optuna.trial import FrozenTrial # NOQA\n\n\nclass NopPruner(BasePruner):\n \"\"\"Pruner which never prunes trials.\n\n Example:\n\n .. testcode::\n\n import numpy as np\n from sklearn.datasets import load_iris\n from sklearn.linear_model import SGDClassifier\n from sklearn.model_selection import train_test_split\n\n import optuna\n\n X, y = load_iris(return_X_y=True)\n X_train, X_valid, y_train, y_valid = train_test_split(X, y)\n classes = np.unique(y)\n\n def objective(trial):\n alpha = trial.suggest_uniform('alpha', 0.0, 1.0)\n clf = SGDClassifier(alpha=alpha)\n n_train_iter = 100\n\n for step in range(n_train_iter):\n clf.partial_fit(X_train, y_train, classes=classes)\n\n intermediate_value = clf.score(X_valid, y_valid)\n trial.report(intermediate_value, step)\n\n if trial.should_prune():\n assert False, \"should_prune() should always return False with this pruner.\"\n raise optuna.TrialPruned()\n\n return clf.score(X_valid, y_valid)\n\n study = optuna.create_study(direction='maximize',\n pruner=optuna.pruners.NopPruner())\n study.optimize(objective, n_trials=20)\n \"\"\"\n\n def prune(self, study, trial):\n # type: (Study, FrozenTrial) -> bool\n\n return False\n", "path": "optuna/pruners/_nop.py"}], "after_files": [{"content": "import optuna\nfrom optuna.pruners import BasePruner\n\n\nclass NopPruner(BasePruner):\n \"\"\"Pruner which never prunes trials.\n\n Example:\n\n .. testcode::\n\n import numpy as np\n from sklearn.datasets import load_iris\n from sklearn.linear_model import SGDClassifier\n from sklearn.model_selection import train_test_split\n\n import optuna\n\n X, y = load_iris(return_X_y=True)\n X_train, X_valid, y_train, y_valid = train_test_split(X, y)\n classes = np.unique(y)\n\n def objective(trial):\n alpha = trial.suggest_uniform('alpha', 0.0, 1.0)\n clf = SGDClassifier(alpha=alpha)\n n_train_iter = 100\n\n for step in range(n_train_iter):\n clf.partial_fit(X_train, y_train, classes=classes)\n\n intermediate_value = clf.score(X_valid, y_valid)\n trial.report(intermediate_value, step)\n\n if trial.should_prune():\n assert False, \"should_prune() should always return False with this pruner.\"\n raise optuna.TrialPruned()\n\n return clf.score(X_valid, y_valid)\n\n study = optuna.create_study(direction='maximize',\n pruner=optuna.pruners.NopPruner())\n study.optimize(objective, n_trials=20)\n \"\"\"\n\n def prune(self, study: \"optuna.study.Study\", trial: \"optuna.trial.FrozenTrial\") -> bool:\n\n return False\n", "path": "optuna/pruners/_nop.py"}]}
889
210
gh_patches_debug_14763
rasdani/github-patches
git_diff
pantsbuild__pants-20300
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `terraform_deployment` cannot load vars files if the root `terraform_module` is not in the same dir **Describe the bug** root/BUILD: ``` terraform_deployment(root_module="//mod0:mod0", var_files=["a.tfvars"]) ``` root/a.tfvars: ``` var0 = "hihello" ``` mod/BUILD: ``` terraform_module() ``` mod/main.tf: ``` resource "null_resource" "dep" {} ``` running `pants experimental-deploy //root:root` yields: ``` Engine traceback: in select .. in pants.core.goals.deploy.run_deploy `experimental-deploy` goal Traceback (most recent call last): File "/home/lilatomic/vnd/pants/src/python/pants/core/goals/deploy.py", line 176, in run_deploy deploy_processes = await MultiGet( File "/home/lilatomic/vnd/pants/src/python/pants/engine/internals/selectors.py", line 374, in MultiGet return await _MultiGet(tuple(__arg0)) File "/home/lilatomic/vnd/pants/src/python/pants/engine/internals/selectors.py", line 172, in __await__ result = yield self.gets ValueError: 'root/a.tfvars' is not in the subpath of 'mod0' OR one path is relative and the other is absolute. ``` **Pants version** 2.18+ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/python/pants/backend/terraform/utils.py` Content: ``` 1 # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 import shlex 4 from pathlib import PurePath 5 6 7 def terraform_arg(name: str, value: str) -> str: 8 """Format a Terraform arg.""" 9 return f"{name}={shlex.quote(value)}" 10 11 12 def terraform_relpath(chdir: str, target: str) -> str: 13 """Compute the relative path of a target file to the Terraform deployment root.""" 14 return PurePath(target).relative_to(chdir).as_posix() 15 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/python/pants/backend/terraform/utils.py b/src/python/pants/backend/terraform/utils.py --- a/src/python/pants/backend/terraform/utils.py +++ b/src/python/pants/backend/terraform/utils.py @@ -1,7 +1,7 @@ # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). +import os.path import shlex -from pathlib import PurePath def terraform_arg(name: str, value: str) -> str: @@ -11,4 +11,4 @@ def terraform_relpath(chdir: str, target: str) -> str: """Compute the relative path of a target file to the Terraform deployment root.""" - return PurePath(target).relative_to(chdir).as_posix() + return os.path.relpath(target, start=chdir)
{"golden_diff": "diff --git a/src/python/pants/backend/terraform/utils.py b/src/python/pants/backend/terraform/utils.py\n--- a/src/python/pants/backend/terraform/utils.py\n+++ b/src/python/pants/backend/terraform/utils.py\n@@ -1,7 +1,7 @@\n # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n+import os.path\n import shlex\n-from pathlib import PurePath\n \n \n def terraform_arg(name: str, value: str) -> str:\n@@ -11,4 +11,4 @@\n \n def terraform_relpath(chdir: str, target: str) -> str:\n \"\"\"Compute the relative path of a target file to the Terraform deployment root.\"\"\"\n- return PurePath(target).relative_to(chdir).as_posix()\n+ return os.path.relpath(target, start=chdir)\n", "issue": "`terraform_deployment` cannot load vars files if the root `terraform_module` is not in the same dir\n**Describe the bug**\r\n\r\nroot/BUILD:\r\n```\r\nterraform_deployment(root_module=\"//mod0:mod0\", var_files=[\"a.tfvars\"])\r\n```\r\nroot/a.tfvars:\r\n```\r\nvar0 = \"hihello\"\r\n```\r\nmod/BUILD:\r\n```\r\nterraform_module()\r\n```\r\nmod/main.tf:\r\n```\r\nresource \"null_resource\" \"dep\" {}\r\n```\r\n\r\nrunning `pants experimental-deploy //root:root` yields:\r\n```\r\nEngine traceback:\r\n in select\r\n ..\r\n in pants.core.goals.deploy.run_deploy\r\n `experimental-deploy` goal\r\n\r\nTraceback (most recent call last):\r\n File \"/home/lilatomic/vnd/pants/src/python/pants/core/goals/deploy.py\", line 176, in run_deploy\r\n deploy_processes = await MultiGet(\r\n File \"/home/lilatomic/vnd/pants/src/python/pants/engine/internals/selectors.py\", line 374, in MultiGet\r\n return await _MultiGet(tuple(__arg0))\r\n File \"/home/lilatomic/vnd/pants/src/python/pants/engine/internals/selectors.py\", line 172, in __await__\r\n result = yield self.gets\r\nValueError: 'root/a.tfvars' is not in the subpath of 'mod0' OR one path is relative and the other is absolute.\r\n```\r\n\r\n**Pants version**\r\n2.18+\r\n\n", "before_files": [{"content": "# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nimport shlex\nfrom pathlib import PurePath\n\n\ndef terraform_arg(name: str, value: str) -> str:\n \"\"\"Format a Terraform arg.\"\"\"\n return f\"{name}={shlex.quote(value)}\"\n\n\ndef terraform_relpath(chdir: str, target: str) -> str:\n \"\"\"Compute the relative path of a target file to the Terraform deployment root.\"\"\"\n return PurePath(target).relative_to(chdir).as_posix()\n", "path": "src/python/pants/backend/terraform/utils.py"}], "after_files": [{"content": "# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nimport os.path\nimport shlex\n\n\ndef terraform_arg(name: str, value: str) -> str:\n \"\"\"Format a Terraform arg.\"\"\"\n return f\"{name}={shlex.quote(value)}\"\n\n\ndef terraform_relpath(chdir: str, target: str) -> str:\n \"\"\"Compute the relative path of a target file to the Terraform deployment root.\"\"\"\n return os.path.relpath(target, start=chdir)\n", "path": "src/python/pants/backend/terraform/utils.py"}]}
729
197
gh_patches_debug_35794
rasdani/github-patches
git_diff
microsoft__hi-ml-430
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Clean up console logging for runner Starting the runner prints out "sys.path at container level" twice. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hi-ml-histopathology/src/histopathology/configs/SSL/CRCK_SimCLRContainer.py` Content: ``` 1 # ------------------------------------------------------------------------------------------ 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. 4 # ------------------------------------------------------------------------------------------ 5 from enum import Enum 6 from pathlib import Path 7 from typing import Any 8 import sys 9 10 from SSL.lightning_containers.ssl_container import EncoderName, SSLContainer, SSLDatasetName 11 from SSL.utils import SSLTrainingType 12 from histopathology.datasets.tcga_crck_tiles_dataset import TcgaCrck_TilesDatasetWithReturnIndex 13 from histopathology.configs.SSL.HistoSimCLRContainer import HistoSSLContainer 14 15 current_file = Path(__file__) 16 print(f"Running container from {current_file}") 17 print(f"Sys path container level {sys.path}") 18 19 20 class SSLDatasetNameHiml(SSLDatasetName, Enum): # type: ignore 21 TCGA_CRCK = "CRCKTilesDataset" 22 23 24 class CRCK_SimCLR(HistoSSLContainer): 25 """ 26 Config to train SSL model on CRCK tiles dataset. 27 Augmentation can be configured by using a configuration yml file or by specifying the set of transformations 28 in the _get_transforms method. 29 It has been tested locally and on AML on the full training dataset (93408 tiles). 30 """ 31 SSLContainer._SSLDataClassMappings.update({SSLDatasetNameHiml.TCGA_CRCK.value: 32 TcgaCrck_TilesDatasetWithReturnIndex}) 33 34 def __init__(self, **kwargs: Any) -> None: 35 # if not running in Azure ML, you may want to override certain properties on the command line, such as: 36 # --is_debug_model = True 37 # --num_workers = 0 38 # --max_epochs = 2 39 40 super().__init__(ssl_training_dataset_name=SSLDatasetNameHiml.TCGA_CRCK, 41 linear_head_dataset_name=SSLDatasetNameHiml.TCGA_CRCK, 42 azure_datasets=["TCGA-CRCk"], 43 random_seed=1, 44 num_workers=8, 45 is_debug_model=False, 46 model_checkpoint_save_interval=50, 47 model_checkpoints_save_last_k=3, 48 model_monitor_metric='ssl_online_evaluator/val/AreaUnderRocCurve', 49 model_monitor_mode='max', 50 max_epochs=50, 51 ssl_training_batch_size=48, # GPU memory is at 70% with batch_size=32, 2GPUs 52 ssl_encoder=EncoderName.resnet50, 53 ssl_training_type=SSLTrainingType.SimCLR, 54 use_balanced_binary_loss_for_linear_head=True, 55 ssl_augmentation_config=None, # Change to path_augmentation to use the config 56 linear_head_augmentation_config=None, # Change to path_augmentation to use the config 57 drop_last=False, 58 **kwargs) 59 ``` Path: `hi-ml-histopathology/src/histopathology/configs/SSL/PANDA_SimCLRContainer.py` Content: ``` 1 # ------------------------------------------------------------------------------------------ 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. 4 # ------------------------------------------------------------------------------------------ 5 from enum import Enum 6 from pathlib import Path 7 from typing import Any 8 import sys 9 10 from SSL.lightning_containers.ssl_container import EncoderName, SSLContainer, SSLDatasetName 11 from SSL.utils import SSLTrainingType 12 from health_azure.utils import is_running_in_azure_ml 13 from histopathology.datasets.panda_tiles_dataset import PandaTilesDatasetWithReturnIndex 14 from histopathology.configs.SSL.HistoSimCLRContainer import HistoSSLContainer 15 from histopathology.datasets.default_paths import PANDA_TILES_DATASET_ID 16 17 18 current_file = Path(__file__) 19 print(f"Running container from {current_file}") 20 print(f"Sys path container level {sys.path}") 21 22 23 class SSLDatasetNameHiml(SSLDatasetName, Enum): # type: ignore 24 PANDA = "PandaTilesDataset" 25 26 27 class PANDA_SimCLR(HistoSSLContainer): 28 """ 29 Config to train SSL model on Panda tiles dataset. 30 Augmentation can be configured by using a configuration yml file or by specifying the set of transformations 31 in the _get_transforms method. 32 It has been tested on a toy local dataset (2 slides) and on AML on (~25 slides). 33 """ 34 SSLContainer._SSLDataClassMappings.update({SSLDatasetNameHiml.PANDA.value: PandaTilesDatasetWithReturnIndex}) 35 36 def __init__(self, **kwargs: Any) -> None: 37 super().__init__(ssl_training_dataset_name=SSLDatasetNameHiml.PANDA, 38 linear_head_dataset_name=SSLDatasetNameHiml.PANDA, 39 azure_datasets=[PANDA_TILES_DATASET_ID], 40 random_seed=1, 41 num_workers=5, 42 is_debug_model=False, 43 model_checkpoint_save_interval=50, 44 model_checkpoints_save_last_k=3, 45 model_monitor_metric='ssl_online_evaluator/val/AccuracyAtThreshold05', 46 model_monitor_mode='max', 47 max_epochs=200, 48 ssl_training_batch_size=128, 49 ssl_encoder=EncoderName.resnet50, 50 ssl_training_type=SSLTrainingType.SimCLR, 51 use_balanced_binary_loss_for_linear_head=True, 52 ssl_augmentation_config=None, # Change to path_augmentation to use the config 53 linear_head_augmentation_config=None, # Change to path_augmentation to use the config 54 drop_last=False, 55 **kwargs) 56 self.pl_check_val_every_n_epoch = 10 57 PandaTilesDatasetWithReturnIndex.occupancy_threshold = 0 58 PandaTilesDatasetWithReturnIndex.random_subset_fraction = 1 59 if not is_running_in_azure_ml(): 60 self.is_debug_model = True 61 self.num_workers = 0 62 self.max_epochs = 2 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/hi-ml-histopathology/src/histopathology/configs/SSL/CRCK_SimCLRContainer.py b/hi-ml-histopathology/src/histopathology/configs/SSL/CRCK_SimCLRContainer.py --- a/hi-ml-histopathology/src/histopathology/configs/SSL/CRCK_SimCLRContainer.py +++ b/hi-ml-histopathology/src/histopathology/configs/SSL/CRCK_SimCLRContainer.py @@ -3,19 +3,13 @@ # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------ from enum import Enum -from pathlib import Path from typing import Any -import sys from SSL.lightning_containers.ssl_container import EncoderName, SSLContainer, SSLDatasetName from SSL.utils import SSLTrainingType from histopathology.datasets.tcga_crck_tiles_dataset import TcgaCrck_TilesDatasetWithReturnIndex from histopathology.configs.SSL.HistoSimCLRContainer import HistoSSLContainer -current_file = Path(__file__) -print(f"Running container from {current_file}") -print(f"Sys path container level {sys.path}") - class SSLDatasetNameHiml(SSLDatasetName, Enum): # type: ignore TCGA_CRCK = "CRCKTilesDataset" diff --git a/hi-ml-histopathology/src/histopathology/configs/SSL/PANDA_SimCLRContainer.py b/hi-ml-histopathology/src/histopathology/configs/SSL/PANDA_SimCLRContainer.py --- a/hi-ml-histopathology/src/histopathology/configs/SSL/PANDA_SimCLRContainer.py +++ b/hi-ml-histopathology/src/histopathology/configs/SSL/PANDA_SimCLRContainer.py @@ -3,9 +3,7 @@ # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------ from enum import Enum -from pathlib import Path from typing import Any -import sys from SSL.lightning_containers.ssl_container import EncoderName, SSLContainer, SSLDatasetName from SSL.utils import SSLTrainingType @@ -15,11 +13,6 @@ from histopathology.datasets.default_paths import PANDA_TILES_DATASET_ID -current_file = Path(__file__) -print(f"Running container from {current_file}") -print(f"Sys path container level {sys.path}") - - class SSLDatasetNameHiml(SSLDatasetName, Enum): # type: ignore PANDA = "PandaTilesDataset"
{"golden_diff": "diff --git a/hi-ml-histopathology/src/histopathology/configs/SSL/CRCK_SimCLRContainer.py b/hi-ml-histopathology/src/histopathology/configs/SSL/CRCK_SimCLRContainer.py\n--- a/hi-ml-histopathology/src/histopathology/configs/SSL/CRCK_SimCLRContainer.py\n+++ b/hi-ml-histopathology/src/histopathology/configs/SSL/CRCK_SimCLRContainer.py\n@@ -3,19 +3,13 @@\n # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n # ------------------------------------------------------------------------------------------\n from enum import Enum\n-from pathlib import Path\n from typing import Any\n-import sys\n \n from SSL.lightning_containers.ssl_container import EncoderName, SSLContainer, SSLDatasetName\n from SSL.utils import SSLTrainingType\n from histopathology.datasets.tcga_crck_tiles_dataset import TcgaCrck_TilesDatasetWithReturnIndex\n from histopathology.configs.SSL.HistoSimCLRContainer import HistoSSLContainer\n \n-current_file = Path(__file__)\n-print(f\"Running container from {current_file}\")\n-print(f\"Sys path container level {sys.path}\")\n-\n \n class SSLDatasetNameHiml(SSLDatasetName, Enum): # type: ignore\n TCGA_CRCK = \"CRCKTilesDataset\"\ndiff --git a/hi-ml-histopathology/src/histopathology/configs/SSL/PANDA_SimCLRContainer.py b/hi-ml-histopathology/src/histopathology/configs/SSL/PANDA_SimCLRContainer.py\n--- a/hi-ml-histopathology/src/histopathology/configs/SSL/PANDA_SimCLRContainer.py\n+++ b/hi-ml-histopathology/src/histopathology/configs/SSL/PANDA_SimCLRContainer.py\n@@ -3,9 +3,7 @@\n # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n # ------------------------------------------------------------------------------------------\n from enum import Enum\n-from pathlib import Path\n from typing import Any\n-import sys\n \n from SSL.lightning_containers.ssl_container import EncoderName, SSLContainer, SSLDatasetName\n from SSL.utils import SSLTrainingType\n@@ -15,11 +13,6 @@\n from histopathology.datasets.default_paths import PANDA_TILES_DATASET_ID\n \n \n-current_file = Path(__file__)\n-print(f\"Running container from {current_file}\")\n-print(f\"Sys path container level {sys.path}\")\n-\n-\n class SSLDatasetNameHiml(SSLDatasetName, Enum): # type: ignore\n PANDA = \"PandaTilesDataset\"\n", "issue": "Clean up console logging for runner\nStarting the runner prints out \"sys.path at container level\" twice.\n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Any\nimport sys\n\nfrom SSL.lightning_containers.ssl_container import EncoderName, SSLContainer, SSLDatasetName\nfrom SSL.utils import SSLTrainingType\nfrom histopathology.datasets.tcga_crck_tiles_dataset import TcgaCrck_TilesDatasetWithReturnIndex\nfrom histopathology.configs.SSL.HistoSimCLRContainer import HistoSSLContainer\n\ncurrent_file = Path(__file__)\nprint(f\"Running container from {current_file}\")\nprint(f\"Sys path container level {sys.path}\")\n\n\nclass SSLDatasetNameHiml(SSLDatasetName, Enum): # type: ignore\n TCGA_CRCK = \"CRCKTilesDataset\"\n\n\nclass CRCK_SimCLR(HistoSSLContainer):\n \"\"\"\n Config to train SSL model on CRCK tiles dataset.\n Augmentation can be configured by using a configuration yml file or by specifying the set of transformations\n in the _get_transforms method.\n It has been tested locally and on AML on the full training dataset (93408 tiles).\n \"\"\"\n SSLContainer._SSLDataClassMappings.update({SSLDatasetNameHiml.TCGA_CRCK.value:\n TcgaCrck_TilesDatasetWithReturnIndex})\n\n def __init__(self, **kwargs: Any) -> None:\n # if not running in Azure ML, you may want to override certain properties on the command line, such as:\n # --is_debug_model = True\n # --num_workers = 0\n # --max_epochs = 2\n\n super().__init__(ssl_training_dataset_name=SSLDatasetNameHiml.TCGA_CRCK,\n linear_head_dataset_name=SSLDatasetNameHiml.TCGA_CRCK,\n azure_datasets=[\"TCGA-CRCk\"],\n random_seed=1,\n num_workers=8,\n is_debug_model=False,\n model_checkpoint_save_interval=50,\n model_checkpoints_save_last_k=3,\n model_monitor_metric='ssl_online_evaluator/val/AreaUnderRocCurve',\n model_monitor_mode='max',\n max_epochs=50,\n ssl_training_batch_size=48, # GPU memory is at 70% with batch_size=32, 2GPUs\n ssl_encoder=EncoderName.resnet50,\n ssl_training_type=SSLTrainingType.SimCLR,\n use_balanced_binary_loss_for_linear_head=True,\n ssl_augmentation_config=None, # Change to path_augmentation to use the config\n linear_head_augmentation_config=None, # Change to path_augmentation to use the config\n drop_last=False,\n **kwargs)\n", "path": "hi-ml-histopathology/src/histopathology/configs/SSL/CRCK_SimCLRContainer.py"}, {"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Any\nimport sys\n\nfrom SSL.lightning_containers.ssl_container import EncoderName, SSLContainer, SSLDatasetName\nfrom SSL.utils import SSLTrainingType\nfrom health_azure.utils import is_running_in_azure_ml\nfrom histopathology.datasets.panda_tiles_dataset import PandaTilesDatasetWithReturnIndex\nfrom histopathology.configs.SSL.HistoSimCLRContainer import HistoSSLContainer\nfrom histopathology.datasets.default_paths import PANDA_TILES_DATASET_ID\n\n\ncurrent_file = Path(__file__)\nprint(f\"Running container from {current_file}\")\nprint(f\"Sys path container level {sys.path}\")\n\n\nclass SSLDatasetNameHiml(SSLDatasetName, Enum): # type: ignore\n PANDA = \"PandaTilesDataset\"\n\n\nclass PANDA_SimCLR(HistoSSLContainer):\n \"\"\"\n Config to train SSL model on Panda tiles dataset.\n Augmentation can be configured by using a configuration yml file or by specifying the set of transformations\n in the _get_transforms method.\n It has been tested on a toy local dataset (2 slides) and on AML on (~25 slides).\n \"\"\"\n SSLContainer._SSLDataClassMappings.update({SSLDatasetNameHiml.PANDA.value: PandaTilesDatasetWithReturnIndex})\n\n def __init__(self, **kwargs: Any) -> None:\n super().__init__(ssl_training_dataset_name=SSLDatasetNameHiml.PANDA,\n linear_head_dataset_name=SSLDatasetNameHiml.PANDA,\n azure_datasets=[PANDA_TILES_DATASET_ID],\n random_seed=1,\n num_workers=5,\n is_debug_model=False,\n model_checkpoint_save_interval=50,\n model_checkpoints_save_last_k=3,\n model_monitor_metric='ssl_online_evaluator/val/AccuracyAtThreshold05',\n model_monitor_mode='max',\n max_epochs=200,\n ssl_training_batch_size=128,\n ssl_encoder=EncoderName.resnet50,\n ssl_training_type=SSLTrainingType.SimCLR,\n use_balanced_binary_loss_for_linear_head=True,\n ssl_augmentation_config=None, # Change to path_augmentation to use the config\n linear_head_augmentation_config=None, # Change to path_augmentation to use the config\n drop_last=False,\n **kwargs)\n self.pl_check_val_every_n_epoch = 10\n PandaTilesDatasetWithReturnIndex.occupancy_threshold = 0\n PandaTilesDatasetWithReturnIndex.random_subset_fraction = 1\n if not is_running_in_azure_ml():\n self.is_debug_model = True\n self.num_workers = 0\n self.max_epochs = 2\n", "path": "hi-ml-histopathology/src/histopathology/configs/SSL/PANDA_SimCLRContainer.py"}], "after_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom enum import Enum\nfrom typing import Any\n\nfrom SSL.lightning_containers.ssl_container import EncoderName, SSLContainer, SSLDatasetName\nfrom SSL.utils import SSLTrainingType\nfrom histopathology.datasets.tcga_crck_tiles_dataset import TcgaCrck_TilesDatasetWithReturnIndex\nfrom histopathology.configs.SSL.HistoSimCLRContainer import HistoSSLContainer\n\n\nclass SSLDatasetNameHiml(SSLDatasetName, Enum): # type: ignore\n TCGA_CRCK = \"CRCKTilesDataset\"\n\n\nclass CRCK_SimCLR(HistoSSLContainer):\n \"\"\"\n Config to train SSL model on CRCK tiles dataset.\n Augmentation can be configured by using a configuration yml file or by specifying the set of transformations\n in the _get_transforms method.\n It has been tested locally and on AML on the full training dataset (93408 tiles).\n \"\"\"\n SSLContainer._SSLDataClassMappings.update({SSLDatasetNameHiml.TCGA_CRCK.value:\n TcgaCrck_TilesDatasetWithReturnIndex})\n\n def __init__(self, **kwargs: Any) -> None:\n # if not running in Azure ML, you may want to override certain properties on the command line, such as:\n # --is_debug_model = True\n # --num_workers = 0\n # --max_epochs = 2\n\n super().__init__(ssl_training_dataset_name=SSLDatasetNameHiml.TCGA_CRCK,\n linear_head_dataset_name=SSLDatasetNameHiml.TCGA_CRCK,\n azure_datasets=[\"TCGA-CRCk\"],\n random_seed=1,\n num_workers=8,\n is_debug_model=False,\n model_checkpoint_save_interval=50,\n model_checkpoints_save_last_k=3,\n model_monitor_metric='ssl_online_evaluator/val/AreaUnderRocCurve',\n model_monitor_mode='max',\n max_epochs=50,\n ssl_training_batch_size=48, # GPU memory is at 70% with batch_size=32, 2GPUs\n ssl_encoder=EncoderName.resnet50,\n ssl_training_type=SSLTrainingType.SimCLR,\n use_balanced_binary_loss_for_linear_head=True,\n ssl_augmentation_config=None, # Change to path_augmentation to use the config\n linear_head_augmentation_config=None, # Change to path_augmentation to use the config\n drop_last=False,\n **kwargs)\n", "path": "hi-ml-histopathology/src/histopathology/configs/SSL/CRCK_SimCLRContainer.py"}, {"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom enum import Enum\nfrom typing import Any\n\nfrom SSL.lightning_containers.ssl_container import EncoderName, SSLContainer, SSLDatasetName\nfrom SSL.utils import SSLTrainingType\nfrom health_azure.utils import is_running_in_azure_ml\nfrom histopathology.datasets.panda_tiles_dataset import PandaTilesDatasetWithReturnIndex\nfrom histopathology.configs.SSL.HistoSimCLRContainer import HistoSSLContainer\nfrom histopathology.datasets.default_paths import PANDA_TILES_DATASET_ID\n\n\nclass SSLDatasetNameHiml(SSLDatasetName, Enum): # type: ignore\n PANDA = \"PandaTilesDataset\"\n\n\nclass PANDA_SimCLR(HistoSSLContainer):\n \"\"\"\n Config to train SSL model on Panda tiles dataset.\n Augmentation can be configured by using a configuration yml file or by specifying the set of transformations\n in the _get_transforms method.\n It has been tested on a toy local dataset (2 slides) and on AML on (~25 slides).\n \"\"\"\n SSLContainer._SSLDataClassMappings.update({SSLDatasetNameHiml.PANDA.value: PandaTilesDatasetWithReturnIndex})\n\n def __init__(self, **kwargs: Any) -> None:\n super().__init__(ssl_training_dataset_name=SSLDatasetNameHiml.PANDA,\n linear_head_dataset_name=SSLDatasetNameHiml.PANDA,\n azure_datasets=[PANDA_TILES_DATASET_ID],\n random_seed=1,\n num_workers=5,\n is_debug_model=False,\n model_checkpoint_save_interval=50,\n model_checkpoints_save_last_k=3,\n model_monitor_metric='ssl_online_evaluator/val/AccuracyAtThreshold05',\n model_monitor_mode='max',\n max_epochs=200,\n ssl_training_batch_size=128,\n ssl_encoder=EncoderName.resnet50,\n ssl_training_type=SSLTrainingType.SimCLR,\n use_balanced_binary_loss_for_linear_head=True,\n ssl_augmentation_config=None, # Change to path_augmentation to use the config\n linear_head_augmentation_config=None, # Change to path_augmentation to use the config\n drop_last=False,\n **kwargs)\n self.pl_check_val_every_n_epoch = 10\n PandaTilesDatasetWithReturnIndex.occupancy_threshold = 0\n PandaTilesDatasetWithReturnIndex.random_subset_fraction = 1\n if not is_running_in_azure_ml():\n self.is_debug_model = True\n self.num_workers = 0\n self.max_epochs = 2\n", "path": "hi-ml-histopathology/src/histopathology/configs/SSL/PANDA_SimCLRContainer.py"}]}
1,808
582
gh_patches_debug_30438
rasdani/github-patches
git_diff
bridgecrewio__checkov-5301
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TypeError in SecretManagerSecret90days **Describe the issue** While running a scan on TF code, I'm getting a TypeError **Examples** The relevant TF code is: ``` resource "aws_secretsmanager_secret_rotation" "rds_password_rotation" { secret_id = aws_secretsmanager_secret.credentials.id rotation_lambda_arn = "arn:..." rotation_rules { automatically_after_days = var.db_password_rotation_days } } variable "db_password_rotation_days" { description = "Number of days in which the RDS password will be rotated" type = number } ``` **Exception Trace** ``` Failed to run check CKV_AWS_304 on rds.tf:aws_secretsmanager_secret_rotation.rds_password_rotation Traceback (most recent call last): File "\venv\Lib\site-packages\checkov\common\checks\base_check.py", line 73, in run check_result["result"] = self.scan_entity_conf(entity_configuration, entity_type) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "\venv\Lib\site-packages\checkov\terraform\checks\resource\base_resource_check.py", line 43, in scan_entity_conf return self.scan_resource_conf(conf) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "\venv\Lib\site-packages\checkov\terraform\checks\resource\aws\SecretManagerSecret90days.py", line 20, in scan_resource_conf if days < 90: ^^^^^^^^^ TypeError: '<' not supported between instances of 'str' and 'int' ``` **Desktop (please complete the following information):** - OS: Windows 10 for Workstation - Checkov Version 2.3.301 **Additional context** I inspected the value of date at the line causing the error and it is the string `var.db_password_rotation_days`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py` Content: ``` 1 2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 3 from checkov.common.models.enums import CheckCategories, CheckResult 4 5 6 class SecretManagerSecret90days(BaseResourceCheck): 7 8 def __init__(self): 9 name = "Ensure Secrets Manager secrets should be rotated within 90 days" 10 id = "CKV_AWS_304" 11 supported_resources = ["aws_secretsmanager_secret_rotation"] 12 categories = [CheckCategories.GENERAL_SECURITY] 13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 14 15 def scan_resource_conf(self, conf) -> CheckResult: 16 if conf.get("rotation_rules") and isinstance(conf.get("rotation_rules"), list): 17 rule = conf.get("rotation_rules")[0] 18 if rule.get('automatically_after_days') and isinstance(rule.get('automatically_after_days'), list): 19 days = rule.get('automatically_after_days')[0] 20 if days < 90: 21 return CheckResult.PASSED 22 return CheckResult.FAILED 23 24 25 check = SecretManagerSecret90days() 26 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py b/checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py --- a/checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py +++ b/checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py @@ -1,23 +1,27 @@ +from __future__ import annotations +from typing import Any + +from checkov.common.util.type_forcers import force_int from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck from checkov.common.models.enums import CheckCategories, CheckResult class SecretManagerSecret90days(BaseResourceCheck): - - def __init__(self): + def __init__(self) -> None: name = "Ensure Secrets Manager secrets should be rotated within 90 days" id = "CKV_AWS_304" - supported_resources = ["aws_secretsmanager_secret_rotation"] - categories = [CheckCategories.GENERAL_SECURITY] + supported_resources = ("aws_secretsmanager_secret_rotation",) + categories = (CheckCategories.GENERAL_SECURITY,) super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - def scan_resource_conf(self, conf) -> CheckResult: - if conf.get("rotation_rules") and isinstance(conf.get("rotation_rules"), list): - rule = conf.get("rotation_rules")[0] - if rule.get('automatically_after_days') and isinstance(rule.get('automatically_after_days'), list): - days = rule.get('automatically_after_days')[0] - if days < 90: + def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult: + rules = conf.get("rotation_rules") + if rules and isinstance(rules, list): + days = rules[0].get('automatically_after_days') + if days and isinstance(days, list): + days = force_int(days[0]) + if days is not None and days < 90: return CheckResult.PASSED return CheckResult.FAILED
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py b/checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py\n--- a/checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py\n+++ b/checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py\n@@ -1,23 +1,27 @@\n+from __future__ import annotations\n \n+from typing import Any\n+\n+from checkov.common.util.type_forcers import force_int\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n from checkov.common.models.enums import CheckCategories, CheckResult\n \n \n class SecretManagerSecret90days(BaseResourceCheck):\n-\n- def __init__(self):\n+ def __init__(self) -> None:\n name = \"Ensure Secrets Manager secrets should be rotated within 90 days\"\n id = \"CKV_AWS_304\"\n- supported_resources = [\"aws_secretsmanager_secret_rotation\"]\n- categories = [CheckCategories.GENERAL_SECURITY]\n+ supported_resources = (\"aws_secretsmanager_secret_rotation\",)\n+ categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf) -> CheckResult:\n- if conf.get(\"rotation_rules\") and isinstance(conf.get(\"rotation_rules\"), list):\n- rule = conf.get(\"rotation_rules\")[0]\n- if rule.get('automatically_after_days') and isinstance(rule.get('automatically_after_days'), list):\n- days = rule.get('automatically_after_days')[0]\n- if days < 90:\n+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n+ rules = conf.get(\"rotation_rules\")\n+ if rules and isinstance(rules, list):\n+ days = rules[0].get('automatically_after_days')\n+ if days and isinstance(days, list):\n+ days = force_int(days[0])\n+ if days is not None and days < 90:\n return CheckResult.PASSED\n return CheckResult.FAILED\n", "issue": "TypeError in SecretManagerSecret90days\n**Describe the issue**\r\nWhile running a scan on TF code, I'm getting a TypeError \r\n\r\n\r\n**Examples**\r\nThe relevant TF code is:\r\n```\r\nresource \"aws_secretsmanager_secret_rotation\" \"rds_password_rotation\" {\r\n secret_id = aws_secretsmanager_secret.credentials.id\r\n rotation_lambda_arn = \"arn:...\"\r\n\r\n rotation_rules {\r\n automatically_after_days = var.db_password_rotation_days\r\n }\r\n\r\n}\r\n\r\nvariable \"db_password_rotation_days\" {\r\n description = \"Number of days in which the RDS password will be rotated\"\r\n type = number\r\n}\r\n\r\n```\r\n**Exception Trace**\r\n```\r\nFailed to run check CKV_AWS_304 on rds.tf:aws_secretsmanager_secret_rotation.rds_password_rotation\r\nTraceback (most recent call last):\r\n File \"\\venv\\Lib\\site-packages\\checkov\\common\\checks\\base_check.py\", line 73, in run\r\n check_result[\"result\"] = self.scan_entity_conf(entity_configuration, entity_type)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"\\venv\\Lib\\site-packages\\checkov\\terraform\\checks\\resource\\base_resource_check.py\", line 43, in scan_entity_conf\r\n return self.scan_resource_conf(conf)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"\\venv\\Lib\\site-packages\\checkov\\terraform\\checks\\resource\\aws\\SecretManagerSecret90days.py\", line 20, in scan_resource_conf\r\n if days < 90:\r\n ^^^^^^^^^\r\nTypeError: '<' not supported between instances of 'str' and 'int' \r\n```\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10 for Workstation\r\n - Checkov Version 2.3.301\r\n\r\n**Additional context**\r\nI inspected the value of date at the line causing the error and it is the string `var.db_password_rotation_days`. \n", "before_files": [{"content": "\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass SecretManagerSecret90days(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure Secrets Manager secrets should be rotated within 90 days\"\n id = \"CKV_AWS_304\"\n supported_resources = [\"aws_secretsmanager_secret_rotation\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf) -> CheckResult:\n if conf.get(\"rotation_rules\") and isinstance(conf.get(\"rotation_rules\"), list):\n rule = conf.get(\"rotation_rules\")[0]\n if rule.get('automatically_after_days') and isinstance(rule.get('automatically_after_days'), list):\n days = rule.get('automatically_after_days')[0]\n if days < 90:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = SecretManagerSecret90days()\n", "path": "checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.util.type_forcers import force_int\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass SecretManagerSecret90days(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure Secrets Manager secrets should be rotated within 90 days\"\n id = \"CKV_AWS_304\"\n supported_resources = (\"aws_secretsmanager_secret_rotation\",)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n rules = conf.get(\"rotation_rules\")\n if rules and isinstance(rules, list):\n days = rules[0].get('automatically_after_days')\n if days and isinstance(days, list):\n days = force_int(days[0])\n if days is not None and days < 90:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = SecretManagerSecret90days()\n", "path": "checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py"}]}
971
476
gh_patches_debug_19980
rasdani/github-patches
git_diff
cfpb__consumerfinance.gov-229
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Centering on mobile `the-bureau` page contains media blocks whose content (image & body) becomes centered at mobile sizes via a `media__centered` class. The `office` index page, however, introduces a new pattern of media blocks whose image centers on mobile while the body remains left-aligned. It seems like it would be more useful to add a general-purpose `.centered-on-mobile` class (or two classes, one for inline & the other for block elements) that could be applied to the appropriate parts of the media object rather than handle this behavior through .`media` modifiers. Thoughts? Preferences? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `_lib/wordpress_office_processor.py` Content: ``` 1 import sys 2 import json 3 import os.path 4 import requests 5 6 def posts_at_url(url): 7 8 current_page = 1 9 max_page = sys.maxint 10 11 while current_page <= max_page: 12 13 url = os.path.expandvars(url) 14 resp = requests.get(url, params={'page':current_page, 'count': '-1'}) 15 results = json.loads(resp.content) 16 current_page += 1 17 max_page = results['pages'] 18 for p in results['posts']: 19 yield p 20 21 def documents(name, url, **kwargs): 22 23 for post in posts_at_url(url): 24 yield process_office(post) 25 26 27 def process_office(item): 28 29 item['_id'] = item['slug'] 30 custom_fields = item['custom_fields'] 31 32 # get intro text & subscribe form data from custom fields 33 for attr in ['intro_text', 'intro_subscribe_form', 'related_contact']: 34 if attr in custom_fields: 35 item[attr] = custom_fields[attr][0] 36 37 # build top story dict 38 top_story = {} 39 for attr in ['top_story_head', 'top_story_desc']: 40 if attr in custom_fields: 41 top_story[attr] = custom_fields[attr][0] 42 43 # convert top story links into a proper list 44 top_story_links = [] 45 for x in xrange(0,5): 46 key = 'top_story_links_%s' % x 47 if key in custom_fields: 48 top_story_links.append(custom_fields[key]) 49 50 if top_story_links: 51 top_story['top_story_links'] = top_story_links 52 53 if top_story: 54 item['top_story'] = top_story 55 56 # create list of office resource dicts 57 item['resources'] = [] 58 for x in xrange(1,4): 59 resource = {} 60 fields = ['head', 'desc', 'icon', 'link_0'] 61 for field in fields: 62 field_name = 'resource%s_%s' % (str(x), field) 63 if field_name in custom_fields and custom_fields[field_name][0] != '': 64 if field == 'link_0': 65 resource['link'] = custom_fields[field_name] 66 else: 67 resource[field] = custom_fields[field_name][0] 68 69 if resource: 70 item['resources'].append(resource) 71 72 return item 73 74 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/_lib/wordpress_office_processor.py b/_lib/wordpress_office_processor.py --- a/_lib/wordpress_office_processor.py +++ b/_lib/wordpress_office_processor.py @@ -55,17 +55,17 @@ # create list of office resource dicts item['resources'] = [] - for x in xrange(1,4): + for x in xrange(0,4): resource = {} - fields = ['head', 'desc', 'icon', 'link_0'] + fields = ['head', 'desc', 'icon', 'link'] for field in fields: - field_name = 'resource%s_%s' % (str(x), field) + field_name = 'resource_%s_%s' % (str(x), field) if field_name in custom_fields and custom_fields[field_name][0] != '': - if field == 'link_0': - resource['link'] = custom_fields[field_name] + if field == 'link': + resource[field] = custom_fields[field_name] else: resource[field] = custom_fields[field_name][0] - + if resource: item['resources'].append(resource)
{"golden_diff": "diff --git a/_lib/wordpress_office_processor.py b/_lib/wordpress_office_processor.py\n--- a/_lib/wordpress_office_processor.py\n+++ b/_lib/wordpress_office_processor.py\n@@ -55,17 +55,17 @@\n \n # create list of office resource dicts\n item['resources'] = []\n- for x in xrange(1,4):\n+ for x in xrange(0,4):\n resource = {}\n- fields = ['head', 'desc', 'icon', 'link_0']\n+ fields = ['head', 'desc', 'icon', 'link']\n for field in fields:\n- field_name = 'resource%s_%s' % (str(x), field)\n+ field_name = 'resource_%s_%s' % (str(x), field)\n if field_name in custom_fields and custom_fields[field_name][0] != '':\n- if field == 'link_0':\n- resource['link'] = custom_fields[field_name]\n+ if field == 'link':\n+ resource[field] = custom_fields[field_name]\n else:\n resource[field] = custom_fields[field_name][0]\n- \n+ \n if resource:\n item['resources'].append(resource)\n", "issue": "Centering on mobile\n`the-bureau` page contains media blocks whose content (image & body) becomes centered at mobile sizes via a `media__centered` class. The `office` index page, however, introduces a new pattern of media blocks whose image centers on mobile while the body remains left-aligned. \n\nIt seems like it would be more useful to add a general-purpose `.centered-on-mobile` class (or two classes, one for inline & the other for block elements) that could be applied to the appropriate parts of the media object rather than handle this behavior through .`media` modifiers. \n\nThoughts? Preferences?\n\n", "before_files": [{"content": "import sys\nimport json\nimport os.path\nimport requests\n\ndef posts_at_url(url):\n \n current_page = 1\n max_page = sys.maxint\n\n while current_page <= max_page:\n\n url = os.path.expandvars(url)\n resp = requests.get(url, params={'page':current_page, 'count': '-1'})\n results = json.loads(resp.content) \n current_page += 1\n max_page = results['pages']\n for p in results['posts']:\n yield p\n \ndef documents(name, url, **kwargs):\n \n for post in posts_at_url(url):\n yield process_office(post)\n\n\ndef process_office(item):\n \n item['_id'] = item['slug']\n custom_fields = item['custom_fields']\n \n # get intro text & subscribe form data from custom fields\n for attr in ['intro_text', 'intro_subscribe_form', 'related_contact']:\n if attr in custom_fields:\n item[attr] = custom_fields[attr][0]\n \n # build top story dict\n top_story = {}\n for attr in ['top_story_head', 'top_story_desc']:\n if attr in custom_fields:\n top_story[attr] = custom_fields[attr][0]\n \n # convert top story links into a proper list\n top_story_links = []\n for x in xrange(0,5):\n key = 'top_story_links_%s' % x\n if key in custom_fields:\n top_story_links.append(custom_fields[key])\n \n if top_story_links: \n top_story['top_story_links'] = top_story_links\n \n if top_story:\n item['top_story'] = top_story\n \n # create list of office resource dicts\n item['resources'] = []\n for x in xrange(1,4):\n resource = {}\n fields = ['head', 'desc', 'icon', 'link_0']\n for field in fields:\n field_name = 'resource%s_%s' % (str(x), field)\n if field_name in custom_fields and custom_fields[field_name][0] != '':\n if field == 'link_0':\n resource['link'] = custom_fields[field_name]\n else:\n resource[field] = custom_fields[field_name][0]\n \n if resource:\n item['resources'].append(resource)\n\n return item\n\n", "path": "_lib/wordpress_office_processor.py"}], "after_files": [{"content": "import sys\nimport json\nimport os.path\nimport requests\n\ndef posts_at_url(url):\n \n current_page = 1\n max_page = sys.maxint\n\n while current_page <= max_page:\n\n url = os.path.expandvars(url)\n resp = requests.get(url, params={'page':current_page, 'count': '-1'})\n results = json.loads(resp.content) \n current_page += 1\n max_page = results['pages']\n for p in results['posts']:\n yield p\n \ndef documents(name, url, **kwargs):\n \n for post in posts_at_url(url):\n yield process_office(post)\n\n\ndef process_office(item):\n \n item['_id'] = item['slug']\n custom_fields = item['custom_fields']\n \n # get intro text & subscribe form data from custom fields\n for attr in ['intro_text', 'intro_subscribe_form', 'related_contact']:\n if attr in custom_fields:\n item[attr] = custom_fields[attr][0]\n \n # build top story dict\n top_story = {}\n for attr in ['top_story_head', 'top_story_desc']:\n if attr in custom_fields:\n top_story[attr] = custom_fields[attr][0]\n \n # convert top story links into a proper list\n top_story_links = []\n for x in xrange(0,5):\n key = 'top_story_links_%s' % x\n if key in custom_fields:\n top_story_links.append(custom_fields[key])\n \n if top_story_links: \n top_story['top_story_links'] = top_story_links\n \n if top_story:\n item['top_story'] = top_story\n \n # create list of office resource dicts\n item['resources'] = []\n for x in xrange(0,4):\n resource = {}\n fields = ['head', 'desc', 'icon', 'link']\n for field in fields:\n field_name = 'resource_%s_%s' % (str(x), field)\n if field_name in custom_fields and custom_fields[field_name][0] != '':\n if field == 'link':\n resource[field] = custom_fields[field_name]\n else:\n resource[field] = custom_fields[field_name][0]\n \n if resource:\n item['resources'].append(resource)\n\n return item\n\n", "path": "_lib/wordpress_office_processor.py"}]}
1,024
263
gh_patches_debug_27775
rasdani/github-patches
git_diff
bridgecrewio__checkov-4917
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Checkov Python error - kubernetes_pod_v1 I get the following error when parsing a **kubernetes_pod_v1** resource: https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/pod_v1 ``` Error: -18 02:46:45,476 [MainThread ] [ERROR] Failed to run check CKV_K8S_[27](https://github.com/technology-services-and-platforms-accnz/dotc-aks/actions/runs/4728024195/jobs/8389176473#step:21:28) on /tfplan.json:kubernetes_pod_v1.test Traceback (most recent call last): File "/usr/local/lib/python3.10/site-packages/checkov/common/checks/base_check.py", line 73, in run check_result["result"] = self.scan_entity_conf(entity_configuration, entity_type) File "/usr/local/lib/python3.10/site-packages/checkov/terraform/checks/resource/base_resource_check.py", line 43, in scan_entity_conf return self.scan_resource_conf(conf) File "/usr/local/lib/python3.10/site-packages/checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py", line 36, in scan_resource_conf if v.get("host_path"): File "/usr/local/lib/python3.10/site-packages/checkov/common/parsers/node.py", line 189, in __getattr__ raise TemplateAttributeError(f'***name*** is invalid') checkov.common.parsers.node.TemplateAttributeError: get is invalid [...] ``` For all the checks that fail. Checkov Version: :2.3.165 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py` Content: ``` 1 from __future__ import annotations 2 3 from typing import Any 4 5 from checkov.common.models.enums import CheckCategories, CheckResult 6 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 7 8 9 class DockerSocketVolume(BaseResourceCheck): 10 def __init__(self) -> None: 11 # Exposing the socket gives container information and increases risk of exploit 12 # read-only is not a solution but only makes it harder to exploit. 13 # Location: Pod.spec.volumes[].hostPath.path 14 # Location: CronJob.spec.jobTemplate.spec.template.spec.volumes[].hostPath.path 15 # Location: *.spec.template.spec.volumes[].hostPath.path 16 id = "CKV_K8S_27" 17 name = "Do not expose the docker daemon socket to containers" 18 supported_resources = ("kubernetes_pod", "kubernetes_pod_v1", 19 "kubernetes_deployment", "kubernetes_deployment_v1", 20 "kubernetes_daemonset", "kubernetes_daemon_set_v1") 21 categories = (CheckCategories.NETWORKING,) 22 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 23 24 def scan_resource_conf(self, conf: dict[str, list[Any]]): 25 if "spec" not in conf: 26 self.evaluated_keys = [""] 27 return CheckResult.FAILED 28 29 spec = conf['spec'][0] 30 if not spec: 31 return CheckResult.UNKNOWN 32 33 if "volume" in spec and spec.get("volume"): 34 volumes = spec.get("volume") 35 for idx, v in enumerate(volumes): 36 if v.get("host_path"): 37 if "path" in v["host_path"][0]: 38 if v["host_path"][0]["path"] == ["/var/run/docker.sock"]: 39 self.evaluated_keys = [f"spec/volume/{idx}/host_path/[0]/path"] 40 return CheckResult.FAILED 41 if "template" in spec and spec.get("template"): 42 template = spec.get("template")[0] 43 if "spec" in template: 44 temp_spec = template.get("spec")[0] 45 if "volume" in temp_spec and temp_spec.get("volume"): 46 volumes = temp_spec.get("volume") 47 for idx, v in enumerate(volumes): 48 if isinstance(v, dict) and v.get("host_path"): 49 if "path" in v["host_path"][0]: 50 path = v["host_path"][0]["path"] 51 if path == ["/var/run/docker.sock"]: 52 self.evaluated_keys = [f"spec/template/spec/volume/{idx}/host_path/[0]/path"] 53 return CheckResult.FAILED 54 55 return CheckResult.PASSED 56 57 58 check = DockerSocketVolume() 59 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py b/checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py --- a/checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py +++ b/checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py @@ -33,7 +33,7 @@ if "volume" in spec and spec.get("volume"): volumes = spec.get("volume") for idx, v in enumerate(volumes): - if v.get("host_path"): + if isinstance(v, dict) and v.get("host_path"): if "path" in v["host_path"][0]: if v["host_path"][0]["path"] == ["/var/run/docker.sock"]: self.evaluated_keys = [f"spec/volume/{idx}/host_path/[0]/path"] @@ -47,8 +47,7 @@ for idx, v in enumerate(volumes): if isinstance(v, dict) and v.get("host_path"): if "path" in v["host_path"][0]: - path = v["host_path"][0]["path"] - if path == ["/var/run/docker.sock"]: + if v["host_path"][0]["path"] == ["/var/run/docker.sock"]: self.evaluated_keys = [f"spec/template/spec/volume/{idx}/host_path/[0]/path"] return CheckResult.FAILED
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py b/checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py\n--- a/checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py\n+++ b/checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py\n@@ -33,7 +33,7 @@\n if \"volume\" in spec and spec.get(\"volume\"):\n volumes = spec.get(\"volume\")\n for idx, v in enumerate(volumes):\n- if v.get(\"host_path\"):\n+ if isinstance(v, dict) and v.get(\"host_path\"):\n if \"path\" in v[\"host_path\"][0]:\n if v[\"host_path\"][0][\"path\"] == [\"/var/run/docker.sock\"]:\n self.evaluated_keys = [f\"spec/volume/{idx}/host_path/[0]/path\"]\n@@ -47,8 +47,7 @@\n for idx, v in enumerate(volumes):\n if isinstance(v, dict) and v.get(\"host_path\"):\n if \"path\" in v[\"host_path\"][0]:\n- path = v[\"host_path\"][0][\"path\"]\n- if path == [\"/var/run/docker.sock\"]:\n+ if v[\"host_path\"][0][\"path\"] == [\"/var/run/docker.sock\"]:\n self.evaluated_keys = [f\"spec/template/spec/volume/{idx}/host_path/[0]/path\"]\n return CheckResult.FAILED\n", "issue": "Checkov Python error - kubernetes_pod_v1\nI get the following error when parsing a **kubernetes_pod_v1** resource:\r\nhttps://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/pod_v1\r\n\r\n```\r\nError: -18 02:46:45,476 [MainThread ] [ERROR] Failed to run check CKV_K8S_[27](https://github.com/technology-services-and-platforms-accnz/dotc-aks/actions/runs/4728024195/jobs/8389176473#step:21:28) on /tfplan.json:kubernetes_pod_v1.test\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.10/site-packages/checkov/common/checks/base_check.py\", line 73, in run\r\n check_result[\"result\"] = self.scan_entity_conf(entity_configuration, entity_type)\r\n File \"/usr/local/lib/python3.10/site-packages/checkov/terraform/checks/resource/base_resource_check.py\", line 43, in scan_entity_conf\r\n return self.scan_resource_conf(conf)\r\n File \"/usr/local/lib/python3.10/site-packages/checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py\", line 36, in scan_resource_conf\r\n if v.get(\"host_path\"):\r\n File \"/usr/local/lib/python3.10/site-packages/checkov/common/parsers/node.py\", line 189, in __getattr__\r\n raise TemplateAttributeError(f'***name*** is invalid')\r\ncheckov.common.parsers.node.TemplateAttributeError: get is invalid\r\n[...]\r\n```\r\n\r\nFor all the checks that fail.\r\n\r\nCheckov Version: :2.3.165\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass DockerSocketVolume(BaseResourceCheck):\n def __init__(self) -> None:\n # Exposing the socket gives container information and increases risk of exploit\n # read-only is not a solution but only makes it harder to exploit.\n # Location: Pod.spec.volumes[].hostPath.path\n # Location: CronJob.spec.jobTemplate.spec.template.spec.volumes[].hostPath.path\n # Location: *.spec.template.spec.volumes[].hostPath.path\n id = \"CKV_K8S_27\"\n name = \"Do not expose the docker daemon socket to containers\"\n supported_resources = (\"kubernetes_pod\", \"kubernetes_pod_v1\",\n \"kubernetes_deployment\", \"kubernetes_deployment_v1\",\n \"kubernetes_daemonset\", \"kubernetes_daemon_set_v1\")\n categories = (CheckCategories.NETWORKING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]):\n if \"spec\" not in conf:\n self.evaluated_keys = [\"\"]\n return CheckResult.FAILED\n\n spec = conf['spec'][0]\n if not spec:\n return CheckResult.UNKNOWN\n\n if \"volume\" in spec and spec.get(\"volume\"):\n volumes = spec.get(\"volume\")\n for idx, v in enumerate(volumes):\n if v.get(\"host_path\"):\n if \"path\" in v[\"host_path\"][0]:\n if v[\"host_path\"][0][\"path\"] == [\"/var/run/docker.sock\"]:\n self.evaluated_keys = [f\"spec/volume/{idx}/host_path/[0]/path\"]\n return CheckResult.FAILED\n if \"template\" in spec and spec.get(\"template\"):\n template = spec.get(\"template\")[0]\n if \"spec\" in template:\n temp_spec = template.get(\"spec\")[0]\n if \"volume\" in temp_spec and temp_spec.get(\"volume\"):\n volumes = temp_spec.get(\"volume\")\n for idx, v in enumerate(volumes):\n if isinstance(v, dict) and v.get(\"host_path\"):\n if \"path\" in v[\"host_path\"][0]:\n path = v[\"host_path\"][0][\"path\"]\n if path == [\"/var/run/docker.sock\"]:\n self.evaluated_keys = [f\"spec/template/spec/volume/{idx}/host_path/[0]/path\"]\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = DockerSocketVolume()\n", "path": "checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass DockerSocketVolume(BaseResourceCheck):\n def __init__(self) -> None:\n # Exposing the socket gives container information and increases risk of exploit\n # read-only is not a solution but only makes it harder to exploit.\n # Location: Pod.spec.volumes[].hostPath.path\n # Location: CronJob.spec.jobTemplate.spec.template.spec.volumes[].hostPath.path\n # Location: *.spec.template.spec.volumes[].hostPath.path\n id = \"CKV_K8S_27\"\n name = \"Do not expose the docker daemon socket to containers\"\n supported_resources = (\"kubernetes_pod\", \"kubernetes_pod_v1\",\n \"kubernetes_deployment\", \"kubernetes_deployment_v1\",\n \"kubernetes_daemonset\", \"kubernetes_daemon_set_v1\")\n categories = (CheckCategories.NETWORKING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]):\n if \"spec\" not in conf:\n self.evaluated_keys = [\"\"]\n return CheckResult.FAILED\n\n spec = conf['spec'][0]\n if not spec:\n return CheckResult.UNKNOWN\n\n if \"volume\" in spec and spec.get(\"volume\"):\n volumes = spec.get(\"volume\")\n for idx, v in enumerate(volumes):\n if isinstance(v, dict) and v.get(\"host_path\"):\n if \"path\" in v[\"host_path\"][0]:\n if v[\"host_path\"][0][\"path\"] == [\"/var/run/docker.sock\"]:\n self.evaluated_keys = [f\"spec/volume/{idx}/host_path/[0]/path\"]\n return CheckResult.FAILED\n if \"template\" in spec and spec.get(\"template\"):\n template = spec.get(\"template\")[0]\n if \"spec\" in template:\n temp_spec = template.get(\"spec\")[0]\n if \"volume\" in temp_spec and temp_spec.get(\"volume\"):\n volumes = temp_spec.get(\"volume\")\n for idx, v in enumerate(volumes):\n if isinstance(v, dict) and v.get(\"host_path\"):\n if \"path\" in v[\"host_path\"][0]:\n if v[\"host_path\"][0][\"path\"] == [\"/var/run/docker.sock\"]:\n self.evaluated_keys = [f\"spec/template/spec/volume/{idx}/host_path/[0]/path\"]\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = DockerSocketVolume()\n", "path": "checkov/terraform/checks/resource/kubernetes/DockerSocketVolume.py"}]}
1,343
311
gh_patches_debug_14901
rasdani/github-patches
git_diff
streamlink__streamlink-2102
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ok.ru VODs <!-- Thanks for reporting a plugin issue! USE THE TEMPLATE. Otherwise your plugin issue may be rejected. First, see the contribution guidelines: https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink Also check the list of open and closed plugin issues: https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22 Please see the text preview to avoid unnecessary formatting errors. --> ## Plugin Issue <!-- Replace [ ] with [x] in order to check the box --> - [x ] This is a plugin issue and I have read the contribution guidelines. ### Description i enter link in #1884 but "https://raw.githubusercontent.com/back-to/plugins/master/plugins/ok_live.py" 404: Not Found. Thanks <!-- Explain the plugin issue as thoroughly as you can. --> ### Reproduction steps / Explicit stream URLs to test <!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. --> 1. D:\my\Streamlinkl\bin>streamlink -l debug "https://ok.ru/video/266205792931" best ### Log output <!-- TEXT LOG OUTPUT IS REQUIRED for a plugin issue! Use the `--loglevel debug` parameter and avoid using parameters which suppress log output. https://streamlink.github.io/cli.html#cmdoption-l Make sure to **remove usernames and passwords** You can copy the output to https://gist.github.com/ or paste it below. --> ``` [cli][debug] OS: Windows 8.1 [cli][debug] Python: 3.5.2 [cli][debug] Streamlink: 0.14.2 [cli][debug] Requests(2.19.1), Socks(1.6.7), Websocket(0.48.0) error: No plugin can handle URL: https://ok.ru/video/266205792931 ``` ### Additional comments, screenshots, etc. [Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugins/ok_live.py` Content: ``` 1 import re 2 3 from streamlink.plugin import Plugin 4 from streamlink.plugin.api import validate 5 from streamlink.plugin.api import useragents 6 from streamlink.stream import HLSStream 7 8 _url_re = re.compile(r"https?://(www\.)?ok\.ru/live/\d+") 9 _vod_re = re.compile(r";(?P<hlsurl>[^;]+video\.m3u8.+?)\\&quot;") 10 11 _schema = validate.Schema( 12 validate.transform(_vod_re.search), 13 validate.any( 14 None, 15 validate.all( 16 validate.get("hlsurl"), 17 validate.url() 18 ) 19 ) 20 ) 21 22 class OK_live(Plugin): 23 """ 24 Support for ok.ru live stream: http://www.ok.ru/live/ 25 """ 26 @classmethod 27 def can_handle_url(cls, url): 28 return _url_re.match(url) is not None 29 30 def _get_streams(self): 31 headers = { 32 'User-Agent': useragents.CHROME, 33 'Referer': self.url 34 } 35 36 hls = self.session.http.get(self.url, headers=headers, schema=_schema) 37 if hls: 38 hls = hls.replace(u'\\\\u0026', u'&') 39 return HLSStream.parse_variant_playlist(self.session, hls, headers=headers) 40 41 42 __plugin__ = OK_live ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugins/ok_live.py b/src/streamlink/plugins/ok_live.py --- a/src/streamlink/plugins/ok_live.py +++ b/src/streamlink/plugins/ok_live.py @@ -5,7 +5,7 @@ from streamlink.plugin.api import useragents from streamlink.stream import HLSStream -_url_re = re.compile(r"https?://(www\.)?ok\.ru/live/\d+") +_url_re = re.compile(r"https?://(www\.)?ok\.ru/(live|video)/\d+") _vod_re = re.compile(r";(?P<hlsurl>[^;]+video\.m3u8.+?)\\&quot;") _schema = validate.Schema( @@ -21,7 +21,7 @@ class OK_live(Plugin): """ - Support for ok.ru live stream: http://www.ok.ru/live/ + Support for ok.ru live stream: http://www.ok.ru/live/ and for ok.ru VoDs: http://www.ok.ru/video/ """ @classmethod def can_handle_url(cls, url):
{"golden_diff": "diff --git a/src/streamlink/plugins/ok_live.py b/src/streamlink/plugins/ok_live.py\n--- a/src/streamlink/plugins/ok_live.py\n+++ b/src/streamlink/plugins/ok_live.py\n@@ -5,7 +5,7 @@\n from streamlink.plugin.api import useragents\n from streamlink.stream import HLSStream\n \n-_url_re = re.compile(r\"https?://(www\\.)?ok\\.ru/live/\\d+\")\n+_url_re = re.compile(r\"https?://(www\\.)?ok\\.ru/(live|video)/\\d+\")\n _vod_re = re.compile(r\";(?P<hlsurl>[^;]+video\\.m3u8.+?)\\\\&quot;\")\n \n _schema = validate.Schema(\n@@ -21,7 +21,7 @@\n \n class OK_live(Plugin):\n \"\"\"\n- Support for ok.ru live stream: http://www.ok.ru/live/\n+ Support for ok.ru live stream: http://www.ok.ru/live/ and for ok.ru VoDs: http://www.ok.ru/video/\n \"\"\"\n @classmethod\n def can_handle_url(cls, url):\n", "issue": "ok.ru VODs\n<!--\r\nThanks for reporting a plugin issue!\r\nUSE THE TEMPLATE. Otherwise your plugin issue may be rejected.\r\n\r\nFirst, see the contribution guidelines:\r\nhttps://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink\r\n\r\nAlso check the list of open and closed plugin issues:\r\nhttps://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22\r\n\r\nPlease see the text preview to avoid unnecessary formatting errors.\r\n-->\r\n\r\n\r\n## Plugin Issue\r\n\r\n<!-- Replace [ ] with [x] in order to check the box -->\r\n- [x ] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\ni enter link in #1884 but \"https://raw.githubusercontent.com/back-to/plugins/master/plugins/ok_live.py\" 404: Not Found. Thanks\r\n<!-- Explain the plugin issue as thoroughly as you can. -->\r\n\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\n1. D:\\my\\Streamlinkl\\bin>streamlink -l debug \"https://ok.ru/video/266205792931\" best\r\n\r\n\r\n\r\n### Log output\r\n\r\n<!--\r\nTEXT LOG OUTPUT IS REQUIRED for a plugin issue!\r\nUse the `--loglevel debug` parameter and avoid using parameters which suppress log output.\r\nhttps://streamlink.github.io/cli.html#cmdoption-l\r\n\r\nMake sure to **remove usernames and passwords**\r\nYou can copy the output to https://gist.github.com/ or paste it below.\r\n-->\r\n\r\n```\r\n[cli][debug] OS: Windows 8.1\r\n[cli][debug] Python: 3.5.2\r\n[cli][debug] Streamlink: 0.14.2\r\n[cli][debug] Requests(2.19.1), Socks(1.6.7), Websocket(0.48.0)\r\nerror: No plugin can handle URL: https://ok.ru/video/266205792931\r\n\r\n```\r\n\r\n\r\n### Additional comments, screenshots, etc.\r\n\r\n\r\n\r\n[Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate)\r\n\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.plugin.api import useragents\nfrom streamlink.stream import HLSStream\n\n_url_re = re.compile(r\"https?://(www\\.)?ok\\.ru/live/\\d+\")\n_vod_re = re.compile(r\";(?P<hlsurl>[^;]+video\\.m3u8.+?)\\\\&quot;\")\n\n_schema = validate.Schema(\n validate.transform(_vod_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(\"hlsurl\"),\n validate.url()\n )\n )\n)\n\nclass OK_live(Plugin):\n \"\"\"\n Support for ok.ru live stream: http://www.ok.ru/live/\n \"\"\"\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url) is not None\n\n def _get_streams(self):\n headers = {\n 'User-Agent': useragents.CHROME,\n 'Referer': self.url\n }\n\n hls = self.session.http.get(self.url, headers=headers, schema=_schema)\n if hls:\n hls = hls.replace(u'\\\\\\\\u0026', u'&')\n return HLSStream.parse_variant_playlist(self.session, hls, headers=headers)\n\n\n__plugin__ = OK_live", "path": "src/streamlink/plugins/ok_live.py"}], "after_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.plugin.api import useragents\nfrom streamlink.stream import HLSStream\n\n_url_re = re.compile(r\"https?://(www\\.)?ok\\.ru/(live|video)/\\d+\")\n_vod_re = re.compile(r\";(?P<hlsurl>[^;]+video\\.m3u8.+?)\\\\&quot;\")\n\n_schema = validate.Schema(\n validate.transform(_vod_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(\"hlsurl\"),\n validate.url()\n )\n )\n)\n\nclass OK_live(Plugin):\n \"\"\"\n Support for ok.ru live stream: http://www.ok.ru/live/ and for ok.ru VoDs: http://www.ok.ru/video/\n \"\"\"\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url) is not None\n\n def _get_streams(self):\n headers = {\n 'User-Agent': useragents.CHROME,\n 'Referer': self.url\n }\n\n hls = self.session.http.get(self.url, headers=headers, schema=_schema)\n if hls:\n hls = hls.replace(u'\\\\\\\\u0026', u'&')\n return HLSStream.parse_variant_playlist(self.session, hls, headers=headers)\n\n\n__plugin__ = OK_live", "path": "src/streamlink/plugins/ok_live.py"}]}
1,122
239
gh_patches_debug_11245
rasdani/github-patches
git_diff
sunpy__sunpy-4596
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Rethinking and rewriting sunpy.self_test We are currently using astropy's test runner for `sunpy.self_test` this was really designed for setup.py and is therefore very full of features which are probably not needed for self_test. Before we (I) go deleting swathes of code as I love to do. What do we want to achieve with self test? Is a very slim wrapper around `pytest --pyargs sunpy` all we need? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sunpy/__init__.py` Content: ``` 1 """ 2 SunPy 3 ===== 4 5 An open-source Python library for Solar Physics data analysis. 6 7 * Homepage: https://sunpy.org 8 * Documentation: https://docs.sunpy.org/en/stable/ 9 """ 10 import os 11 import sys 12 import logging 13 14 from sunpy.tests.runner import SunPyTestRunner 15 from sunpy.util import system_info 16 from sunpy.util.config import load_config, print_config 17 from sunpy.util.logger import _init_log 18 from .version import version as __version__ 19 20 # Enforce Python version check during package import. 21 __minimum_python_version__ = "3.7" 22 23 24 class UnsupportedPythonError(Exception): 25 """Running on an unsupported version of Python.""" 26 27 28 if sys.version_info < tuple(int(val) for val in __minimum_python_version__.split('.')): 29 # This has to be .format to keep backwards compatibly. 30 raise UnsupportedPythonError( 31 "sunpy does not support Python < {}".format(__minimum_python_version__)) 32 33 34 def _get_bibtex(): 35 import textwrap 36 37 # Set the bibtex entry to the article referenced in CITATION.rst 38 citation_file = os.path.join(os.path.dirname(__file__), 'CITATION.rst') 39 40 # Explicitly specify UTF-8 encoding in case the system's default encoding is problematic 41 with open(citation_file, 'r', encoding='utf-8') as citation: 42 # Extract the first bibtex block: 43 ref = citation.read().partition(".. code:: bibtex\n\n")[2] 44 lines = ref.split("\n") 45 # Only read the lines which are indented 46 lines = lines[:[l.startswith(" ") for l in lines].index(False)] 47 ref = textwrap.dedent('\n'.join(lines)) 48 return ref 49 50 51 __citation__ = __bibtex__ = _get_bibtex() 52 53 self_test = SunPyTestRunner.make_test_runner_in(os.path.dirname(__file__)) 54 55 # Load user configuration 56 config = load_config() 57 58 log = _init_log(config=config) 59 60 __all__ = ['config', 'self_test', 'system_info', 'print_config'] 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sunpy/__init__.py b/sunpy/__init__.py --- a/sunpy/__init__.py +++ b/sunpy/__init__.py @@ -11,7 +11,7 @@ import sys import logging -from sunpy.tests.runner import SunPyTestRunner +from sunpy.tests.self_test import self_test from sunpy.util import system_info from sunpy.util.config import load_config, print_config from sunpy.util.logger import _init_log @@ -50,8 +50,6 @@ __citation__ = __bibtex__ = _get_bibtex() -self_test = SunPyTestRunner.make_test_runner_in(os.path.dirname(__file__)) - # Load user configuration config = load_config()
{"golden_diff": "diff --git a/sunpy/__init__.py b/sunpy/__init__.py\n--- a/sunpy/__init__.py\n+++ b/sunpy/__init__.py\n@@ -11,7 +11,7 @@\n import sys\n import logging\n \n-from sunpy.tests.runner import SunPyTestRunner\n+from sunpy.tests.self_test import self_test\n from sunpy.util import system_info\n from sunpy.util.config import load_config, print_config\n from sunpy.util.logger import _init_log\n@@ -50,8 +50,6 @@\n \n __citation__ = __bibtex__ = _get_bibtex()\n \n-self_test = SunPyTestRunner.make_test_runner_in(os.path.dirname(__file__))\n-\n # Load user configuration\n config = load_config()\n", "issue": "Rethinking and rewriting sunpy.self_test\nWe are currently using astropy's test runner for `sunpy.self_test` this was really designed for setup.py and is therefore very full of features which are probably not needed for self_test.\n\nBefore we (I) go deleting swathes of code as I love to do. What do we want to achieve with self test? Is a very slim wrapper around `pytest --pyargs sunpy` all we need?\n", "before_files": [{"content": "\"\"\"\nSunPy\n=====\n\nAn open-source Python library for Solar Physics data analysis.\n\n* Homepage: https://sunpy.org\n* Documentation: https://docs.sunpy.org/en/stable/\n\"\"\"\nimport os\nimport sys\nimport logging\n\nfrom sunpy.tests.runner import SunPyTestRunner\nfrom sunpy.util import system_info\nfrom sunpy.util.config import load_config, print_config\nfrom sunpy.util.logger import _init_log\nfrom .version import version as __version__\n\n# Enforce Python version check during package import.\n__minimum_python_version__ = \"3.7\"\n\n\nclass UnsupportedPythonError(Exception):\n \"\"\"Running on an unsupported version of Python.\"\"\"\n\n\nif sys.version_info < tuple(int(val) for val in __minimum_python_version__.split('.')):\n # This has to be .format to keep backwards compatibly.\n raise UnsupportedPythonError(\n \"sunpy does not support Python < {}\".format(__minimum_python_version__))\n\n\ndef _get_bibtex():\n import textwrap\n\n # Set the bibtex entry to the article referenced in CITATION.rst\n citation_file = os.path.join(os.path.dirname(__file__), 'CITATION.rst')\n\n # Explicitly specify UTF-8 encoding in case the system's default encoding is problematic\n with open(citation_file, 'r', encoding='utf-8') as citation:\n # Extract the first bibtex block:\n ref = citation.read().partition(\".. code:: bibtex\\n\\n\")[2]\n lines = ref.split(\"\\n\")\n # Only read the lines which are indented\n lines = lines[:[l.startswith(\" \") for l in lines].index(False)]\n ref = textwrap.dedent('\\n'.join(lines))\n return ref\n\n\n__citation__ = __bibtex__ = _get_bibtex()\n\nself_test = SunPyTestRunner.make_test_runner_in(os.path.dirname(__file__))\n\n# Load user configuration\nconfig = load_config()\n\nlog = _init_log(config=config)\n\n__all__ = ['config', 'self_test', 'system_info', 'print_config']\n", "path": "sunpy/__init__.py"}], "after_files": [{"content": "\"\"\"\nSunPy\n=====\n\nAn open-source Python library for Solar Physics data analysis.\n\n* Homepage: https://sunpy.org\n* Documentation: https://docs.sunpy.org/en/stable/\n\"\"\"\nimport os\nimport sys\nimport logging\n\nfrom sunpy.tests.self_test import self_test\nfrom sunpy.util import system_info\nfrom sunpy.util.config import load_config, print_config\nfrom sunpy.util.logger import _init_log\nfrom .version import version as __version__\n\n# Enforce Python version check during package import.\n__minimum_python_version__ = \"3.7\"\n\n\nclass UnsupportedPythonError(Exception):\n \"\"\"Running on an unsupported version of Python.\"\"\"\n\n\nif sys.version_info < tuple(int(val) for val in __minimum_python_version__.split('.')):\n # This has to be .format to keep backwards compatibly.\n raise UnsupportedPythonError(\n \"sunpy does not support Python < {}\".format(__minimum_python_version__))\n\n\ndef _get_bibtex():\n import textwrap\n\n # Set the bibtex entry to the article referenced in CITATION.rst\n citation_file = os.path.join(os.path.dirname(__file__), 'CITATION.rst')\n\n # Explicitly specify UTF-8 encoding in case the system's default encoding is problematic\n with open(citation_file, 'r', encoding='utf-8') as citation:\n # Extract the first bibtex block:\n ref = citation.read().partition(\".. code:: bibtex\\n\\n\")[2]\n lines = ref.split(\"\\n\")\n # Only read the lines which are indented\n lines = lines[:[l.startswith(\" \") for l in lines].index(False)]\n ref = textwrap.dedent('\\n'.join(lines))\n return ref\n\n\n__citation__ = __bibtex__ = _get_bibtex()\n\n# Load user configuration\nconfig = load_config()\n\nlog = _init_log(config=config)\n\n__all__ = ['config', 'self_test', 'system_info', 'print_config']\n", "path": "sunpy/__init__.py"}]}
919
168
gh_patches_debug_30401
rasdani/github-patches
git_diff
castorini__pyserini-630
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add ability to select random question for interactive demo hey @saileshnankani - how about we add a `/random` command to ask a random question from the dev set? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyserini/demo/msmarco.py` Content: ``` 1 # 2 # Pyserini: Reproducible IR research with sparse and dense representations 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 # 16 17 import cmd 18 import json 19 20 from pyserini.search import SimpleSearcher 21 from pyserini.dsearch import SimpleDenseSearcher, TctColBertQueryEncoder, AnceQueryEncoder 22 from pyserini.hsearch import HybridSearcher 23 24 25 class MsMarcoDemo(cmd.Cmd): 26 ssearcher = SimpleSearcher.from_prebuilt_index('msmarco-passage') 27 dsearcher = None 28 hsearcher = None 29 searcher = ssearcher 30 31 k = 10 32 prompt = '>>> ' 33 34 # https://stackoverflow.com/questions/35213134/command-prefixes-in-python-cli-using-cmd-in-pythons-standard-library 35 def precmd(self, line): 36 if line[0] == '/': 37 line = line[1:] 38 return line 39 40 def do_help(self, arg): 41 print(f'/help : returns this message') 42 print(f'/k [NUM] : sets k (number of hits to return) to [NUM]') 43 print(f'/model [MODEL] : sets encoder to use the model [MODEL] (one of tct, ance)') 44 print(f'/mode [MODE] : sets retriver type to [MODE] (one of sparse, dense, hybrid)') 45 46 def do_k(self, arg): 47 print(f'setting k = {int(arg)}') 48 self.k = int(arg) 49 50 def do_mode(self, arg): 51 if arg == "sparse": 52 self.searcher = self.ssearcher 53 elif arg == "dense": 54 if self.dsearcher is None: 55 print(f'Specify model through /model before using dense retrieval.') 56 return 57 self.searcher = self.dsearcher 58 elif arg == "hybrid": 59 if self.hsearcher is None: 60 print(f'Specify model through /model before using hybrid retrieval.') 61 return 62 self.searcher = self.hsearcher 63 else: 64 print( 65 f'Mode "{arg}" is invalid. Mode should be one of [sparse, dense, hybrid].') 66 return 67 print(f'setting retriver = {arg}') 68 69 def do_model(self, arg): 70 if arg == "tct": 71 encoder = TctColBertQueryEncoder("castorini/tct_colbert-msmarco") 72 index = "msmarco-passage-tct_colbert-hnsw" 73 elif arg == "ance": 74 encoder = AnceQueryEncoder("castorini/ance-msmarco-passage") 75 index = "msmarco-passage-ance-bf" 76 else: 77 print( 78 f'Model "{arg}" is invalid. Model should be one of [tct, ance].') 79 return 80 81 self.dsearcher = SimpleDenseSearcher.from_prebuilt_index( 82 index, 83 encoder 84 ) 85 self.hsearcher = HybridSearcher(self.dsearcher, self.ssearcher) 86 print(f'setting model = {arg}') 87 88 def do_EOF(self, line): 89 return True 90 91 def default(self, q): 92 hits = self.searcher.search(q, self.k) 93 94 for i in range(0, len(hits)): 95 raw_doc = None 96 if isinstance(self.searcher, SimpleSearcher): 97 raw_doc = hits[i].raw 98 else: 99 doc = self.ssearcher.doc(hits[i].docid) 100 if doc: 101 raw_doc = doc.raw() 102 jsondoc = json.loads(raw_doc) 103 print(f'{i + 1:2} {hits[i].score:.5f} {jsondoc["contents"]}') 104 105 106 if __name__ == '__main__': 107 MsMarcoDemo().cmdloop() 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyserini/demo/msmarco.py b/pyserini/demo/msmarco.py --- a/pyserini/demo/msmarco.py +++ b/pyserini/demo/msmarco.py @@ -16,13 +16,18 @@ import cmd import json +import os +import random from pyserini.search import SimpleSearcher from pyserini.dsearch import SimpleDenseSearcher, TctColBertQueryEncoder, AnceQueryEncoder from pyserini.hsearch import HybridSearcher +from pyserini import search class MsMarcoDemo(cmd.Cmd): + dev_topics = list(search.get_topics('msmarco-passage-dev-subset').values()) + ssearcher = SimpleSearcher.from_prebuilt_index('msmarco-passage') dsearcher = None hsearcher = None @@ -42,6 +47,7 @@ print(f'/k [NUM] : sets k (number of hits to return) to [NUM]') print(f'/model [MODEL] : sets encoder to use the model [MODEL] (one of tct, ance)') print(f'/mode [MODE] : sets retriver type to [MODE] (one of sparse, dense, hybrid)') + print(f'/random : returns results for a random question from dev subset') def do_k(self, arg): print(f'setting k = {int(arg)}') @@ -85,6 +91,11 @@ self.hsearcher = HybridSearcher(self.dsearcher, self.ssearcher) print(f'setting model = {arg}') + def do_random(self, arg): + q = random.choice(self.dev_topics)['title'] + print(f'question: {q}') + self.default(q) + def do_EOF(self, line): return True
{"golden_diff": "diff --git a/pyserini/demo/msmarco.py b/pyserini/demo/msmarco.py\n--- a/pyserini/demo/msmarco.py\n+++ b/pyserini/demo/msmarco.py\n@@ -16,13 +16,18 @@\n \n import cmd\n import json\n+import os\n+import random\n \n from pyserini.search import SimpleSearcher\n from pyserini.dsearch import SimpleDenseSearcher, TctColBertQueryEncoder, AnceQueryEncoder\n from pyserini.hsearch import HybridSearcher\n+from pyserini import search\n \n \n class MsMarcoDemo(cmd.Cmd):\n+ dev_topics = list(search.get_topics('msmarco-passage-dev-subset').values())\n+\n ssearcher = SimpleSearcher.from_prebuilt_index('msmarco-passage')\n dsearcher = None\n hsearcher = None\n@@ -42,6 +47,7 @@\n print(f'/k [NUM] : sets k (number of hits to return) to [NUM]')\n print(f'/model [MODEL] : sets encoder to use the model [MODEL] (one of tct, ance)')\n print(f'/mode [MODE] : sets retriver type to [MODE] (one of sparse, dense, hybrid)')\n+ print(f'/random : returns results for a random question from dev subset')\n \n def do_k(self, arg):\n print(f'setting k = {int(arg)}')\n@@ -85,6 +91,11 @@\n self.hsearcher = HybridSearcher(self.dsearcher, self.ssearcher)\n print(f'setting model = {arg}')\n \n+ def do_random(self, arg):\n+ q = random.choice(self.dev_topics)['title']\n+ print(f'question: {q}')\n+ self.default(q)\n+\n def do_EOF(self, line):\n return True\n", "issue": "Add ability to select random question for interactive demo\nhey @saileshnankani - how about we add a `/random` command to ask a random question from the dev set?\n", "before_files": [{"content": "#\n# Pyserini: Reproducible IR research with sparse and dense representations\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport cmd\nimport json\n\nfrom pyserini.search import SimpleSearcher\nfrom pyserini.dsearch import SimpleDenseSearcher, TctColBertQueryEncoder, AnceQueryEncoder\nfrom pyserini.hsearch import HybridSearcher\n\n\nclass MsMarcoDemo(cmd.Cmd):\n ssearcher = SimpleSearcher.from_prebuilt_index('msmarco-passage')\n dsearcher = None\n hsearcher = None\n searcher = ssearcher\n\n k = 10\n prompt = '>>> '\n\n # https://stackoverflow.com/questions/35213134/command-prefixes-in-python-cli-using-cmd-in-pythons-standard-library\n def precmd(self, line):\n if line[0] == '/':\n line = line[1:]\n return line\n\n def do_help(self, arg):\n print(f'/help : returns this message')\n print(f'/k [NUM] : sets k (number of hits to return) to [NUM]')\n print(f'/model [MODEL] : sets encoder to use the model [MODEL] (one of tct, ance)')\n print(f'/mode [MODE] : sets retriver type to [MODE] (one of sparse, dense, hybrid)')\n\n def do_k(self, arg):\n print(f'setting k = {int(arg)}')\n self.k = int(arg)\n\n def do_mode(self, arg):\n if arg == \"sparse\":\n self.searcher = self.ssearcher\n elif arg == \"dense\":\n if self.dsearcher is None:\n print(f'Specify model through /model before using dense retrieval.')\n return\n self.searcher = self.dsearcher\n elif arg == \"hybrid\":\n if self.hsearcher is None:\n print(f'Specify model through /model before using hybrid retrieval.')\n return\n self.searcher = self.hsearcher\n else:\n print(\n f'Mode \"{arg}\" is invalid. Mode should be one of [sparse, dense, hybrid].')\n return\n print(f'setting retriver = {arg}')\n\n def do_model(self, arg):\n if arg == \"tct\":\n encoder = TctColBertQueryEncoder(\"castorini/tct_colbert-msmarco\")\n index = \"msmarco-passage-tct_colbert-hnsw\"\n elif arg == \"ance\":\n encoder = AnceQueryEncoder(\"castorini/ance-msmarco-passage\")\n index = \"msmarco-passage-ance-bf\"\n else:\n print(\n f'Model \"{arg}\" is invalid. Model should be one of [tct, ance].')\n return\n\n self.dsearcher = SimpleDenseSearcher.from_prebuilt_index(\n index,\n encoder\n )\n self.hsearcher = HybridSearcher(self.dsearcher, self.ssearcher)\n print(f'setting model = {arg}')\n\n def do_EOF(self, line):\n return True\n\n def default(self, q):\n hits = self.searcher.search(q, self.k)\n\n for i in range(0, len(hits)):\n raw_doc = None\n if isinstance(self.searcher, SimpleSearcher):\n raw_doc = hits[i].raw\n else:\n doc = self.ssearcher.doc(hits[i].docid)\n if doc:\n raw_doc = doc.raw()\n jsondoc = json.loads(raw_doc)\n print(f'{i + 1:2} {hits[i].score:.5f} {jsondoc[\"contents\"]}')\n\n\nif __name__ == '__main__':\n MsMarcoDemo().cmdloop()\n", "path": "pyserini/demo/msmarco.py"}], "after_files": [{"content": "#\n# Pyserini: Reproducible IR research with sparse and dense representations\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport cmd\nimport json\nimport os\nimport random\n\nfrom pyserini.search import SimpleSearcher\nfrom pyserini.dsearch import SimpleDenseSearcher, TctColBertQueryEncoder, AnceQueryEncoder\nfrom pyserini.hsearch import HybridSearcher\nfrom pyserini import search\n\n\nclass MsMarcoDemo(cmd.Cmd):\n dev_topics = list(search.get_topics('msmarco-passage-dev-subset').values())\n\n ssearcher = SimpleSearcher.from_prebuilt_index('msmarco-passage')\n dsearcher = None\n hsearcher = None\n searcher = ssearcher\n\n k = 10\n prompt = '>>> '\n\n # https://stackoverflow.com/questions/35213134/command-prefixes-in-python-cli-using-cmd-in-pythons-standard-library\n def precmd(self, line):\n if line[0] == '/':\n line = line[1:]\n return line\n\n def do_help(self, arg):\n print(f'/help : returns this message')\n print(f'/k [NUM] : sets k (number of hits to return) to [NUM]')\n print(f'/model [MODEL] : sets encoder to use the model [MODEL] (one of tct, ance)')\n print(f'/mode [MODE] : sets retriver type to [MODE] (one of sparse, dense, hybrid)')\n print(f'/random : returns results for a random question from dev subset')\n\n def do_k(self, arg):\n print(f'setting k = {int(arg)}')\n self.k = int(arg)\n\n def do_mode(self, arg):\n if arg == \"sparse\":\n self.searcher = self.ssearcher\n elif arg == \"dense\":\n if self.dsearcher is None:\n print(f'Specify model through /model before using dense retrieval.')\n return\n self.searcher = self.dsearcher\n elif arg == \"hybrid\":\n if self.hsearcher is None:\n print(f'Specify model through /model before using hybrid retrieval.')\n return\n self.searcher = self.hsearcher\n else:\n print(\n f'Mode \"{arg}\" is invalid. Mode should be one of [sparse, dense, hybrid].')\n return\n print(f'setting retriver = {arg}')\n\n def do_model(self, arg):\n if arg == \"tct\":\n encoder = TctColBertQueryEncoder(\"castorini/tct_colbert-msmarco\")\n index = \"msmarco-passage-tct_colbert-hnsw\"\n elif arg == \"ance\":\n encoder = AnceQueryEncoder(\"castorini/ance-msmarco-passage\")\n index = \"msmarco-passage-ance-bf\"\n else:\n print(\n f'Model \"{arg}\" is invalid. Model should be one of [tct, ance].')\n return\n\n self.dsearcher = SimpleDenseSearcher.from_prebuilt_index(\n index,\n encoder\n )\n self.hsearcher = HybridSearcher(self.dsearcher, self.ssearcher)\n print(f'setting model = {arg}')\n\n def do_random(self, arg):\n q = random.choice(self.dev_topics)['title']\n print(f'question: {q}')\n self.default(q)\n\n def do_EOF(self, line):\n return True\n\n def default(self, q):\n hits = self.searcher.search(q, self.k)\n\n for i in range(0, len(hits)):\n raw_doc = None\n if isinstance(self.searcher, SimpleSearcher):\n raw_doc = hits[i].raw\n else:\n doc = self.ssearcher.doc(hits[i].docid)\n if doc:\n raw_doc = doc.raw()\n jsondoc = json.loads(raw_doc)\n print(f'{i + 1:2} {hits[i].score:.5f} {jsondoc[\"contents\"]}')\n\n\nif __name__ == '__main__':\n MsMarcoDemo().cmdloop()\n", "path": "pyserini/demo/msmarco.py"}]}
1,449
408
gh_patches_debug_4757
rasdani/github-patches
git_diff
nonebot__nonebot2-2537
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: websockets 驱动器连接关闭 code 不存在 ### 操作系统 Windows ### Python 版本 3.11.6 ### NoneBot 版本 2.1.2 ### 适配器 nonebot-adapter-kaiheila 0.3.0 ### 协议端 kook API(websockets) ### 描述问题 在nonebot库的websockets.py模块中,处理WebSocket异常时出现了AttributeError。这个问题发生在尝试处理ConnectionClosed异常的过程中。 异常信息: `AttributeError: 'NoneType' object has no attribute 'code'` 相关代码: ```python def catch_closed(func: Callable[P, Awaitable[T]]) -> Callable[P, Awaitable[T]]: @wraps(func) async def decorator(*args: P.args, **kwargs: P.kwargs) -> T: try: return await func(*args, **kwargs) except ConnectionClosed as e: if e.rcvd_then_sent: raise WebSocketClosed(e.rcvd.code, e.rcvd.reason) # type: ignore else: raise WebSocketClosed(e.sent.code, e.sent.reason) # type: ignore return decorator ``` 位于:`nonebot/drivers/websockets.py` Line 56 这个问题是在捕获ConnectionClosed异常时发生的,但e.rcvd或e.sent对象可能为None(在websocket超时这种不是由关闭帧影响的情况下会都不存在)。这导致尝试访问NoneType对象的code属性,从而引发了AttributeError。 ### 复现步骤 1.在环境下加载websockets adapter 2.在插件的event handler中存在不使用异步的长时间等待情形(在我的例子是等待语言模型的计算) ### 期望的结果 按照上述描述修改此bug ### 截图或日志 无 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nonebot/drivers/websockets.py` Content: ``` 1 """[websockets](https://websockets.readthedocs.io/) 驱动适配 2 3 ```bash 4 nb driver install websockets 5 # 或者 6 pip install nonebot2[websockets] 7 ``` 8 9 :::tip 提示 10 本驱动仅支持客户端 WebSocket 连接 11 ::: 12 13 FrontMatter: 14 sidebar_position: 4 15 description: nonebot.drivers.websockets 模块 16 """ 17 18 import logging 19 from functools import wraps 20 from contextlib import asynccontextmanager 21 from typing_extensions import ParamSpec, override 22 from typing import TYPE_CHECKING, Union, TypeVar, Callable, Awaitable, AsyncGenerator 23 24 from nonebot.drivers import Request 25 from nonebot.log import LoguruHandler 26 from nonebot.exception import WebSocketClosed 27 from nonebot.drivers.none import Driver as NoneDriver 28 from nonebot.drivers import WebSocket as BaseWebSocket 29 from nonebot.drivers import WebSocketClientMixin, combine_driver 30 31 try: 32 from websockets.exceptions import ConnectionClosed 33 from websockets.legacy.client import Connect, WebSocketClientProtocol 34 except ModuleNotFoundError as e: # pragma: no cover 35 raise ImportError( 36 "Please install websockets first to use this driver. " 37 "Install with pip: `pip install nonebot2[websockets]`" 38 ) from e 39 40 T = TypeVar("T") 41 P = ParamSpec("P") 42 43 logger = logging.Logger("websockets.client", "INFO") 44 logger.addHandler(LoguruHandler()) 45 46 47 def catch_closed(func: Callable[P, Awaitable[T]]) -> Callable[P, Awaitable[T]]: 48 @wraps(func) 49 async def decorator(*args: P.args, **kwargs: P.kwargs) -> T: 50 try: 51 return await func(*args, **kwargs) 52 except ConnectionClosed as e: 53 if e.rcvd_then_sent: 54 raise WebSocketClosed(e.rcvd.code, e.rcvd.reason) # type: ignore 55 else: 56 raise WebSocketClosed(e.sent.code, e.sent.reason) # type: ignore 57 58 return decorator 59 60 61 class Mixin(WebSocketClientMixin): 62 """Websockets Mixin""" 63 64 @property 65 @override 66 def type(self) -> str: 67 return "websockets" 68 69 @override 70 @asynccontextmanager 71 async def websocket(self, setup: Request) -> AsyncGenerator["WebSocket", None]: 72 connection = Connect( 73 str(setup.url), 74 extra_headers={**setup.headers, **setup.cookies.as_header(setup)}, 75 open_timeout=setup.timeout, 76 ) 77 async with connection as ws: 78 yield WebSocket(request=setup, websocket=ws) 79 80 81 class WebSocket(BaseWebSocket): 82 """Websockets WebSocket Wrapper""" 83 84 @override 85 def __init__(self, *, request: Request, websocket: WebSocketClientProtocol): 86 super().__init__(request=request) 87 self.websocket = websocket 88 89 @property 90 @override 91 def closed(self) -> bool: 92 return self.websocket.closed 93 94 @override 95 async def accept(self): 96 raise NotImplementedError 97 98 @override 99 async def close(self, code: int = 1000, reason: str = ""): 100 await self.websocket.close(code, reason) 101 102 @override 103 @catch_closed 104 async def receive(self) -> Union[str, bytes]: 105 return await self.websocket.recv() 106 107 @override 108 @catch_closed 109 async def receive_text(self) -> str: 110 msg = await self.websocket.recv() 111 if isinstance(msg, bytes): 112 raise TypeError("WebSocket received unexpected frame type: bytes") 113 return msg 114 115 @override 116 @catch_closed 117 async def receive_bytes(self) -> bytes: 118 msg = await self.websocket.recv() 119 if isinstance(msg, str): 120 raise TypeError("WebSocket received unexpected frame type: str") 121 return msg 122 123 @override 124 async def send_text(self, data: str) -> None: 125 await self.websocket.send(data) 126 127 @override 128 async def send_bytes(self, data: bytes) -> None: 129 await self.websocket.send(data) 130 131 132 if TYPE_CHECKING: 133 134 class Driver(Mixin, NoneDriver): 135 ... 136 137 else: 138 Driver = combine_driver(NoneDriver, Mixin) 139 """Websockets Driver""" 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nonebot/drivers/websockets.py b/nonebot/drivers/websockets.py --- a/nonebot/drivers/websockets.py +++ b/nonebot/drivers/websockets.py @@ -50,10 +50,7 @@ try: return await func(*args, **kwargs) except ConnectionClosed as e: - if e.rcvd_then_sent: - raise WebSocketClosed(e.rcvd.code, e.rcvd.reason) # type: ignore - else: - raise WebSocketClosed(e.sent.code, e.sent.reason) # type: ignore + raise WebSocketClosed(e.code, e.reason) return decorator
{"golden_diff": "diff --git a/nonebot/drivers/websockets.py b/nonebot/drivers/websockets.py\n--- a/nonebot/drivers/websockets.py\n+++ b/nonebot/drivers/websockets.py\n@@ -50,10 +50,7 @@\n try:\n return await func(*args, **kwargs)\n except ConnectionClosed as e:\n- if e.rcvd_then_sent:\n- raise WebSocketClosed(e.rcvd.code, e.rcvd.reason) # type: ignore\n- else:\n- raise WebSocketClosed(e.sent.code, e.sent.reason) # type: ignore\n+ raise WebSocketClosed(e.code, e.reason)\n \n return decorator\n", "issue": "Bug: websockets \u9a71\u52a8\u5668\u8fde\u63a5\u5173\u95ed code \u4e0d\u5b58\u5728\n### \u64cd\u4f5c\u7cfb\u7edf\r\n\r\nWindows\r\n\r\n### Python \u7248\u672c\r\n\r\n3.11.6\r\n\r\n### NoneBot \u7248\u672c\r\n\r\n2.1.2\r\n\r\n### \u9002\u914d\u5668\r\n\r\nnonebot-adapter-kaiheila 0.3.0\r\n\r\n### \u534f\u8bae\u7aef\r\n\r\nkook API(websockets)\r\n\r\n### \u63cf\u8ff0\u95ee\u9898\r\n\r\n\u5728nonebot\u5e93\u7684websockets.py\u6a21\u5757\u4e2d\uff0c\u5904\u7406WebSocket\u5f02\u5e38\u65f6\u51fa\u73b0\u4e86AttributeError\u3002\u8fd9\u4e2a\u95ee\u9898\u53d1\u751f\u5728\u5c1d\u8bd5\u5904\u7406ConnectionClosed\u5f02\u5e38\u7684\u8fc7\u7a0b\u4e2d\u3002\r\n\u5f02\u5e38\u4fe1\u606f:\r\n`AttributeError: 'NoneType' object has no attribute 'code'`\r\n\u76f8\u5173\u4ee3\u7801:\r\n```python\r\ndef catch_closed(func: Callable[P, Awaitable[T]]) -> Callable[P, Awaitable[T]]:\r\n @wraps(func)\r\n async def decorator(*args: P.args, **kwargs: P.kwargs) -> T:\r\n try:\r\n return await func(*args, **kwargs)\r\n except ConnectionClosed as e:\r\n if e.rcvd_then_sent:\r\n raise WebSocketClosed(e.rcvd.code, e.rcvd.reason) # type: ignore\r\n else:\r\n raise WebSocketClosed(e.sent.code, e.sent.reason) # type: ignore\r\n\r\n return decorator\r\n```\r\n\r\n\u4f4d\u4e8e:`nonebot/drivers/websockets.py` Line 56\r\n\u8fd9\u4e2a\u95ee\u9898\u662f\u5728\u6355\u83b7ConnectionClosed\u5f02\u5e38\u65f6\u53d1\u751f\u7684\uff0c\u4f46e.rcvd\u6216e.sent\u5bf9\u8c61\u53ef\u80fd\u4e3aNone(\u5728websocket\u8d85\u65f6\u8fd9\u79cd\u4e0d\u662f\u7531\u5173\u95ed\u5e27\u5f71\u54cd\u7684\u60c5\u51b5\u4e0b\u4f1a\u90fd\u4e0d\u5b58\u5728)\u3002\u8fd9\u5bfc\u81f4\u5c1d\u8bd5\u8bbf\u95eeNoneType\u5bf9\u8c61\u7684code\u5c5e\u6027\uff0c\u4ece\u800c\u5f15\u53d1\u4e86AttributeError\u3002\r\n\r\n### \u590d\u73b0\u6b65\u9aa4\r\n\r\n1.\u5728\u73af\u5883\u4e0b\u52a0\u8f7dwebsockets adapter\r\n2.\u5728\u63d2\u4ef6\u7684event handler\u4e2d\u5b58\u5728\u4e0d\u4f7f\u7528\u5f02\u6b65\u7684\u957f\u65f6\u95f4\u7b49\u5f85\u60c5\u5f62\uff08\u5728\u6211\u7684\u4f8b\u5b50\u662f\u7b49\u5f85\u8bed\u8a00\u6a21\u578b\u7684\u8ba1\u7b97\uff09\r\n\r\n\r\n### \u671f\u671b\u7684\u7ed3\u679c\r\n\r\n\u6309\u7167\u4e0a\u8ff0\u63cf\u8ff0\u4fee\u6539\u6b64bug\r\n\r\n### \u622a\u56fe\u6216\u65e5\u5fd7\r\n\r\n\u65e0\n", "before_files": [{"content": "\"\"\"[websockets](https://websockets.readthedocs.io/) \u9a71\u52a8\u9002\u914d\n\n```bash\nnb driver install websockets\n# \u6216\u8005\npip install nonebot2[websockets]\n```\n\n:::tip \u63d0\u793a\n\u672c\u9a71\u52a8\u4ec5\u652f\u6301\u5ba2\u6237\u7aef WebSocket \u8fde\u63a5\n:::\n\nFrontMatter:\n sidebar_position: 4\n description: nonebot.drivers.websockets \u6a21\u5757\n\"\"\"\n\nimport logging\nfrom functools import wraps\nfrom contextlib import asynccontextmanager\nfrom typing_extensions import ParamSpec, override\nfrom typing import TYPE_CHECKING, Union, TypeVar, Callable, Awaitable, AsyncGenerator\n\nfrom nonebot.drivers import Request\nfrom nonebot.log import LoguruHandler\nfrom nonebot.exception import WebSocketClosed\nfrom nonebot.drivers.none import Driver as NoneDriver\nfrom nonebot.drivers import WebSocket as BaseWebSocket\nfrom nonebot.drivers import WebSocketClientMixin, combine_driver\n\ntry:\n from websockets.exceptions import ConnectionClosed\n from websockets.legacy.client import Connect, WebSocketClientProtocol\nexcept ModuleNotFoundError as e: # pragma: no cover\n raise ImportError(\n \"Please install websockets first to use this driver. \"\n \"Install with pip: `pip install nonebot2[websockets]`\"\n ) from e\n\nT = TypeVar(\"T\")\nP = ParamSpec(\"P\")\n\nlogger = logging.Logger(\"websockets.client\", \"INFO\")\nlogger.addHandler(LoguruHandler())\n\n\ndef catch_closed(func: Callable[P, Awaitable[T]]) -> Callable[P, Awaitable[T]]:\n @wraps(func)\n async def decorator(*args: P.args, **kwargs: P.kwargs) -> T:\n try:\n return await func(*args, **kwargs)\n except ConnectionClosed as e:\n if e.rcvd_then_sent:\n raise WebSocketClosed(e.rcvd.code, e.rcvd.reason) # type: ignore\n else:\n raise WebSocketClosed(e.sent.code, e.sent.reason) # type: ignore\n\n return decorator\n\n\nclass Mixin(WebSocketClientMixin):\n \"\"\"Websockets Mixin\"\"\"\n\n @property\n @override\n def type(self) -> str:\n return \"websockets\"\n\n @override\n @asynccontextmanager\n async def websocket(self, setup: Request) -> AsyncGenerator[\"WebSocket\", None]:\n connection = Connect(\n str(setup.url),\n extra_headers={**setup.headers, **setup.cookies.as_header(setup)},\n open_timeout=setup.timeout,\n )\n async with connection as ws:\n yield WebSocket(request=setup, websocket=ws)\n\n\nclass WebSocket(BaseWebSocket):\n \"\"\"Websockets WebSocket Wrapper\"\"\"\n\n @override\n def __init__(self, *, request: Request, websocket: WebSocketClientProtocol):\n super().__init__(request=request)\n self.websocket = websocket\n\n @property\n @override\n def closed(self) -> bool:\n return self.websocket.closed\n\n @override\n async def accept(self):\n raise NotImplementedError\n\n @override\n async def close(self, code: int = 1000, reason: str = \"\"):\n await self.websocket.close(code, reason)\n\n @override\n @catch_closed\n async def receive(self) -> Union[str, bytes]:\n return await self.websocket.recv()\n\n @override\n @catch_closed\n async def receive_text(self) -> str:\n msg = await self.websocket.recv()\n if isinstance(msg, bytes):\n raise TypeError(\"WebSocket received unexpected frame type: bytes\")\n return msg\n\n @override\n @catch_closed\n async def receive_bytes(self) -> bytes:\n msg = await self.websocket.recv()\n if isinstance(msg, str):\n raise TypeError(\"WebSocket received unexpected frame type: str\")\n return msg\n\n @override\n async def send_text(self, data: str) -> None:\n await self.websocket.send(data)\n\n @override\n async def send_bytes(self, data: bytes) -> None:\n await self.websocket.send(data)\n\n\nif TYPE_CHECKING:\n\n class Driver(Mixin, NoneDriver):\n ...\n\nelse:\n Driver = combine_driver(NoneDriver, Mixin)\n \"\"\"Websockets Driver\"\"\"\n", "path": "nonebot/drivers/websockets.py"}], "after_files": [{"content": "\"\"\"[websockets](https://websockets.readthedocs.io/) \u9a71\u52a8\u9002\u914d\n\n```bash\nnb driver install websockets\n# \u6216\u8005\npip install nonebot2[websockets]\n```\n\n:::tip \u63d0\u793a\n\u672c\u9a71\u52a8\u4ec5\u652f\u6301\u5ba2\u6237\u7aef WebSocket \u8fde\u63a5\n:::\n\nFrontMatter:\n sidebar_position: 4\n description: nonebot.drivers.websockets \u6a21\u5757\n\"\"\"\n\nimport logging\nfrom functools import wraps\nfrom contextlib import asynccontextmanager\nfrom typing_extensions import ParamSpec, override\nfrom typing import TYPE_CHECKING, Union, TypeVar, Callable, Awaitable, AsyncGenerator\n\nfrom nonebot.drivers import Request\nfrom nonebot.log import LoguruHandler\nfrom nonebot.exception import WebSocketClosed\nfrom nonebot.drivers.none import Driver as NoneDriver\nfrom nonebot.drivers import WebSocket as BaseWebSocket\nfrom nonebot.drivers import WebSocketClientMixin, combine_driver\n\ntry:\n from websockets.exceptions import ConnectionClosed\n from websockets.legacy.client import Connect, WebSocketClientProtocol\nexcept ModuleNotFoundError as e: # pragma: no cover\n raise ImportError(\n \"Please install websockets first to use this driver. \"\n \"Install with pip: `pip install nonebot2[websockets]`\"\n ) from e\n\nT = TypeVar(\"T\")\nP = ParamSpec(\"P\")\n\nlogger = logging.Logger(\"websockets.client\", \"INFO\")\nlogger.addHandler(LoguruHandler())\n\n\ndef catch_closed(func: Callable[P, Awaitable[T]]) -> Callable[P, Awaitable[T]]:\n @wraps(func)\n async def decorator(*args: P.args, **kwargs: P.kwargs) -> T:\n try:\n return await func(*args, **kwargs)\n except ConnectionClosed as e:\n raise WebSocketClosed(e.code, e.reason)\n\n return decorator\n\n\nclass Mixin(WebSocketClientMixin):\n \"\"\"Websockets Mixin\"\"\"\n\n @property\n @override\n def type(self) -> str:\n return \"websockets\"\n\n @override\n @asynccontextmanager\n async def websocket(self, setup: Request) -> AsyncGenerator[\"WebSocket\", None]:\n connection = Connect(\n str(setup.url),\n extra_headers={**setup.headers, **setup.cookies.as_header(setup)},\n open_timeout=setup.timeout,\n )\n async with connection as ws:\n yield WebSocket(request=setup, websocket=ws)\n\n\nclass WebSocket(BaseWebSocket):\n \"\"\"Websockets WebSocket Wrapper\"\"\"\n\n @override\n def __init__(self, *, request: Request, websocket: WebSocketClientProtocol):\n super().__init__(request=request)\n self.websocket = websocket\n\n @property\n @override\n def closed(self) -> bool:\n return self.websocket.closed\n\n @override\n async def accept(self):\n raise NotImplementedError\n\n @override\n async def close(self, code: int = 1000, reason: str = \"\"):\n await self.websocket.close(code, reason)\n\n @override\n @catch_closed\n async def receive(self) -> Union[str, bytes]:\n return await self.websocket.recv()\n\n @override\n @catch_closed\n async def receive_text(self) -> str:\n msg = await self.websocket.recv()\n if isinstance(msg, bytes):\n raise TypeError(\"WebSocket received unexpected frame type: bytes\")\n return msg\n\n @override\n @catch_closed\n async def receive_bytes(self) -> bytes:\n msg = await self.websocket.recv()\n if isinstance(msg, str):\n raise TypeError(\"WebSocket received unexpected frame type: str\")\n return msg\n\n @override\n async def send_text(self, data: str) -> None:\n await self.websocket.send(data)\n\n @override\n async def send_bytes(self, data: bytes) -> None:\n await self.websocket.send(data)\n\n\nif TYPE_CHECKING:\n\n class Driver(Mixin, NoneDriver):\n ...\n\nelse:\n Driver = combine_driver(NoneDriver, Mixin)\n \"\"\"Websockets Driver\"\"\"\n", "path": "nonebot/drivers/websockets.py"}]}
1,869
142
gh_patches_debug_41419
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-815
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- passing list to template I would like to be able to pass a list to the templates. See the two code blocks at the end of this post for an example. Right now it appears that if you pass a list in the config object, it's read as a list of options for that key. I know that you can use `str.split()` in the jinja2 template, but that's not a real solution, since it's impossible to "escape" the character that's used as the delimiter. What's the best solution here? I would prefer to be able to pass a list in the json object and call it a day, but obviously that doesn't work for the user input prompts. - `cookiecutter.json`: ``` json { "build_steps": [ "do_something", "do_something_else" ] } ``` - `Dockerfile`: ``` jinja2 FROM something {% for step in cookiecutter.build_steps %} RUN {{ step }} {% endfor %} ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cookiecutter/prompt.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """ 4 cookiecutter.prompt 5 --------------------- 6 7 Functions for prompting the user for project info. 8 """ 9 10 from collections import OrderedDict 11 12 import click 13 from past.builtins import basestring 14 15 from future.utils import iteritems 16 17 from jinja2.exceptions import UndefinedError 18 19 from .exceptions import UndefinedVariableInTemplate 20 from .environment import StrictEnvironment 21 22 23 def read_user_variable(var_name, default_value): 24 """Prompt the user for the given variable and return the entered value 25 or the given default. 26 27 :param str var_name: Variable of the context to query the user 28 :param default_value: Value that will be returned if no input happens 29 """ 30 # Please see http://click.pocoo.org/4/api/#click.prompt 31 return click.prompt(var_name, default=default_value) 32 33 34 def read_user_yes_no(question, default_value): 35 """Prompt the user to reply with 'yes' or 'no' (or equivalent values). 36 37 Note: 38 Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n' 39 40 :param str question: Question to the user 41 :param default_value: Value that will be returned if no input happens 42 """ 43 # Please see http://click.pocoo.org/4/api/#click.prompt 44 return click.prompt( 45 question, 46 default=default_value, 47 type=click.BOOL 48 ) 49 50 51 def read_user_choice(var_name, options): 52 """Prompt the user to choose from several options for the given variable. 53 54 The first item will be returned if no input happens. 55 56 :param str var_name: Variable as specified in the context 57 :param list options: Sequence of options that are available to select from 58 :return: Exactly one item of ``options`` that has been chosen by the user 59 """ 60 # Please see http://click.pocoo.org/4/api/#click.prompt 61 if not isinstance(options, list): 62 raise TypeError 63 64 if not options: 65 raise ValueError 66 67 choice_map = OrderedDict( 68 (u'{}'.format(i), value) for i, value in enumerate(options, 1) 69 ) 70 choices = choice_map.keys() 71 default = u'1' 72 73 choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()] 74 prompt = u'\n'.join(( 75 u'Select {}:'.format(var_name), 76 u'\n'.join(choice_lines), 77 u'Choose from {}'.format(u', '.join(choices)) 78 )) 79 80 user_choice = click.prompt( 81 prompt, type=click.Choice(choices), default=default 82 ) 83 return choice_map[user_choice] 84 85 86 def render_variable(env, raw, cookiecutter_dict): 87 if raw is None: 88 return None 89 if not isinstance(raw, basestring): 90 raw = str(raw) 91 template = env.from_string(raw) 92 93 rendered_template = template.render(cookiecutter=cookiecutter_dict) 94 return rendered_template 95 96 97 def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input): 98 """Prompt the user which option to choose from the given. Each of the 99 possible choices is rendered beforehand. 100 """ 101 rendered_options = [ 102 render_variable(env, raw, cookiecutter_dict) for raw in options 103 ] 104 105 if no_input: 106 return rendered_options[0] 107 return read_user_choice(key, rendered_options) 108 109 110 def prompt_for_config(context, no_input=False): 111 """ 112 Prompts the user to enter new config, using context as a source for the 113 field names and sample values. 114 115 :param no_input: Prompt the user at command line for manual configuration? 116 """ 117 cookiecutter_dict = {} 118 env = StrictEnvironment(context=context) 119 120 for key, raw in iteritems(context[u'cookiecutter']): 121 if key.startswith(u'_'): 122 cookiecutter_dict[key] = raw 123 continue 124 125 try: 126 if isinstance(raw, list): 127 # We are dealing with a choice variable 128 val = prompt_choice_for_config( 129 cookiecutter_dict, env, key, raw, no_input 130 ) 131 else: 132 # We are dealing with a regular variable 133 val = render_variable(env, raw, cookiecutter_dict) 134 135 if not no_input: 136 val = read_user_variable(key, val) 137 except UndefinedError as err: 138 msg = "Unable to render variable '{}'".format(key) 139 raise UndefinedVariableInTemplate(msg, err, context) 140 141 cookiecutter_dict[key] = val 142 return cookiecutter_dict 143 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py --- a/cookiecutter/prompt.py +++ b/cookiecutter/prompt.py @@ -8,6 +8,7 @@ """ from collections import OrderedDict +import json import click from past.builtins import basestring @@ -83,11 +84,43 @@ return choice_map[user_choice] +def read_user_dict(var_name, default_value): + """Prompt the user to provide a dictionary of data. + + :param str var_name: Variable as specified in the context + :param default_value: Value that will be returned if no input is provided + :return: A Python dictionary to use in the context. + """ + # Please see http://click.pocoo.org/4/api/#click.prompt + if not isinstance(default_value, dict): + raise TypeError + + raw = click.prompt(var_name, default='default') + if raw != 'default': + value = json.loads(raw, object_hook=OrderedDict) + else: + value = default_value + + return value + + def render_variable(env, raw, cookiecutter_dict): if raw is None: return None - if not isinstance(raw, basestring): + elif isinstance(raw, dict): + return { + render_variable(env, k, cookiecutter_dict): + render_variable(env, v, cookiecutter_dict) + for k, v in raw.items() + } + elif isinstance(raw, list): + return [ + render_variable(env, v, cookiecutter_dict) + for v in raw + ] + elif not isinstance(raw, basestring): raw = str(raw) + template = env.from_string(raw) rendered_template = template.render(cookiecutter=cookiecutter_dict) @@ -117,6 +150,9 @@ cookiecutter_dict = {} env = StrictEnvironment(context=context) + # First pass: Handle simple and raw variables, plus choices. + # These must be done first because the dictionaries keys and + # values might refer to them. for key, raw in iteritems(context[u'cookiecutter']): if key.startswith(u'_'): cookiecutter_dict[key] = raw @@ -128,15 +164,33 @@ val = prompt_choice_for_config( cookiecutter_dict, env, key, raw, no_input ) - else: + cookiecutter_dict[key] = val + elif not isinstance(raw, dict): # We are dealing with a regular variable val = render_variable(env, raw, cookiecutter_dict) if not no_input: val = read_user_variable(key, val) + + cookiecutter_dict[key] = val + except UndefinedError as err: + msg = "Unable to render variable '{}'".format(key) + raise UndefinedVariableInTemplate(msg, err, context) + + # Second pass; handle the dictionaries. + for key, raw in iteritems(context[u'cookiecutter']): + + try: + if isinstance(raw, dict): + # We are dealing with a dict variable + val = render_variable(env, raw, cookiecutter_dict) + + if not no_input: + val = read_user_dict(key, val) + + cookiecutter_dict[key] = val except UndefinedError as err: msg = "Unable to render variable '{}'".format(key) raise UndefinedVariableInTemplate(msg, err, context) - cookiecutter_dict[key] = val return cookiecutter_dict
{"golden_diff": "diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py\n--- a/cookiecutter/prompt.py\n+++ b/cookiecutter/prompt.py\n@@ -8,6 +8,7 @@\n \"\"\"\n \n from collections import OrderedDict\n+import json\n \n import click\n from past.builtins import basestring\n@@ -83,11 +84,43 @@\n return choice_map[user_choice]\n \n \n+def read_user_dict(var_name, default_value):\n+ \"\"\"Prompt the user to provide a dictionary of data.\n+\n+ :param str var_name: Variable as specified in the context\n+ :param default_value: Value that will be returned if no input is provided\n+ :return: A Python dictionary to use in the context.\n+ \"\"\"\n+ # Please see http://click.pocoo.org/4/api/#click.prompt\n+ if not isinstance(default_value, dict):\n+ raise TypeError\n+\n+ raw = click.prompt(var_name, default='default')\n+ if raw != 'default':\n+ value = json.loads(raw, object_hook=OrderedDict)\n+ else:\n+ value = default_value\n+\n+ return value\n+\n+\n def render_variable(env, raw, cookiecutter_dict):\n if raw is None:\n return None\n- if not isinstance(raw, basestring):\n+ elif isinstance(raw, dict):\n+ return {\n+ render_variable(env, k, cookiecutter_dict):\n+ render_variable(env, v, cookiecutter_dict)\n+ for k, v in raw.items()\n+ }\n+ elif isinstance(raw, list):\n+ return [\n+ render_variable(env, v, cookiecutter_dict)\n+ for v in raw\n+ ]\n+ elif not isinstance(raw, basestring):\n raw = str(raw)\n+\n template = env.from_string(raw)\n \n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n@@ -117,6 +150,9 @@\n cookiecutter_dict = {}\n env = StrictEnvironment(context=context)\n \n+ # First pass: Handle simple and raw variables, plus choices.\n+ # These must be done first because the dictionaries keys and\n+ # values might refer to them.\n for key, raw in iteritems(context[u'cookiecutter']):\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n@@ -128,15 +164,33 @@\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n- else:\n+ cookiecutter_dict[key] = val\n+ elif not isinstance(raw, dict):\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n \n if not no_input:\n val = read_user_variable(key, val)\n+\n+ cookiecutter_dict[key] = val\n+ except UndefinedError as err:\n+ msg = \"Unable to render variable '{}'\".format(key)\n+ raise UndefinedVariableInTemplate(msg, err, context)\n+\n+ # Second pass; handle the dictionaries.\n+ for key, raw in iteritems(context[u'cookiecutter']):\n+\n+ try:\n+ if isinstance(raw, dict):\n+ # We are dealing with a dict variable\n+ val = render_variable(env, raw, cookiecutter_dict)\n+\n+ if not no_input:\n+ val = read_user_dict(key, val)\n+\n+ cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n \n- cookiecutter_dict[key] = val\n return cookiecutter_dict\n", "issue": "passing list to template\nI would like to be able to pass a list to the templates. See the two code blocks at the end of this post for an example. Right now it appears that if you pass a list in the config object, it's read as a list of options for that key.\n\nI know that you can use `str.split()` in the jinja2 template, but that's not a real solution, since it's impossible to \"escape\" the character that's used as the delimiter. What's the best solution here? I would prefer to be able to pass a list in the json object and call it a day, but obviously that doesn't work for the user input prompts.\n- `cookiecutter.json`:\n\n``` json\n{\n \"build_steps\": [\n \"do_something\",\n \"do_something_else\"\n ]\n}\n```\n- `Dockerfile`:\n\n``` jinja2\nFROM something\n\n{% for step in cookiecutter.build_steps %}\nRUN {{ step }}\n{% endfor %}\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport click\nfrom past.builtins import basestring\n\nfrom future.utils import iteritems\n\nfrom jinja2.exceptions import UndefinedError\n\nfrom .exceptions import UndefinedVariableInTemplate\nfrom .environment import StrictEnvironment\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt the user for the given variable and return the entered value\n or the given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n type=click.BOOL\n )\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n (u'{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = u'1'\n\n choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]\n prompt = u'\\n'.join((\n u'Select {}:'.format(var_name),\n u'\\n'.join(choice_lines),\n u'Choose from {}'.format(u', '.join(choices))\n ))\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default\n )\n return choice_map[user_choice]\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n if raw is None:\n return None\n if not isinstance(raw, basestring):\n raw = str(raw)\n template = env.from_string(raw)\n\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt the user which option to choose from the given. Each of the\n possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = {}\n env = StrictEnvironment(context=context)\n\n for key, raw in iteritems(context[u'cookiecutter']):\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n continue\n\n try:\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n else:\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n cookiecutter_dict[key] = val\n return cookiecutter_dict\n", "path": "cookiecutter/prompt.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom collections import OrderedDict\nimport json\n\nimport click\nfrom past.builtins import basestring\n\nfrom future.utils import iteritems\n\nfrom jinja2.exceptions import UndefinedError\n\nfrom .exceptions import UndefinedVariableInTemplate\nfrom .environment import StrictEnvironment\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt the user for the given variable and return the entered value\n or the given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n type=click.BOOL\n )\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n (u'{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = u'1'\n\n choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]\n prompt = u'\\n'.join((\n u'Select {}:'.format(var_name),\n u'\\n'.join(choice_lines),\n u'Choose from {}'.format(u', '.join(choices))\n ))\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default\n )\n return choice_map[user_choice]\n\n\ndef read_user_dict(var_name, default_value):\n \"\"\"Prompt the user to provide a dictionary of data.\n\n :param str var_name: Variable as specified in the context\n :param default_value: Value that will be returned if no input is provided\n :return: A Python dictionary to use in the context.\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(default_value, dict):\n raise TypeError\n\n raw = click.prompt(var_name, default='default')\n if raw != 'default':\n value = json.loads(raw, object_hook=OrderedDict)\n else:\n value = default_value\n\n return value\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n if raw is None:\n return None\n elif isinstance(raw, dict):\n return {\n render_variable(env, k, cookiecutter_dict):\n render_variable(env, v, cookiecutter_dict)\n for k, v in raw.items()\n }\n elif isinstance(raw, list):\n return [\n render_variable(env, v, cookiecutter_dict)\n for v in raw\n ]\n elif not isinstance(raw, basestring):\n raw = str(raw)\n\n template = env.from_string(raw)\n\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt the user which option to choose from the given. Each of the\n possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = {}\n env = StrictEnvironment(context=context)\n\n # First pass: Handle simple and raw variables, plus choices.\n # These must be done first because the dictionaries keys and\n # values might refer to them.\n for key, raw in iteritems(context[u'cookiecutter']):\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n continue\n\n try:\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n cookiecutter_dict[key] = val\n elif not isinstance(raw, dict):\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n\n cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n # Second pass; handle the dictionaries.\n for key, raw in iteritems(context[u'cookiecutter']):\n\n try:\n if isinstance(raw, dict):\n # We are dealing with a dict variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_dict(key, val)\n\n cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n return cookiecutter_dict\n", "path": "cookiecutter/prompt.py"}]}
1,790
819
gh_patches_debug_8880
rasdani/github-patches
git_diff
liqd__a4-product-606
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- can't see full time when creating an event on small screen ![bildschirmfoto 2018-07-13 um 10 57 54](https://user-images.githubusercontent.com/41073836/42682788-d0101a54-868b-11e8-816e-e77dc5f8a60e.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `liqd_product/apps/actions/apps.py` Content: ``` 1 from django.apps import AppConfig 2 3 4 class Config(AppConfig): 5 name = 'liqd_product.apps.actions' 6 label = 'liqd_product_actions' 7 8 def ready(self): 9 from adhocracy4.actions.models import configure_icon 10 from adhocracy4.actions.models import configure_type 11 from adhocracy4.actions.verbs import Verbs 12 configure_type( 13 'project', 14 ('a4projects', 'project') 15 ) 16 configure_type( 17 'phase', 18 ('a4phases', 'phase') 19 ) 20 configure_type( 21 'comment', 22 ('a4comments', 'comment') 23 ) 24 configure_type( 25 'rating', 26 ('a4ratings', 'rating') 27 ) 28 configure_type( 29 'item', 30 ('liqd_product_budgeting', 'proposal'), 31 ('liqd_product_ideas', 'idea'), 32 ('liqd_product_mapideas', 'mapidea') 33 ) 34 35 configure_icon('far fa-comment', type='comment') 36 configure_icon('far fa-lightbulb', type='item') 37 configure_icon('fas fa-plus', verb=Verbs.ADD) 38 configure_icon('fas fa-pencil-alt', verb=Verbs.UPDATE) 39 configure_icon('fas fa-flag', verb=Verbs.START) 40 configure_icon('far fa-clock', verb=Verbs.SCHEDULE) 41 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/liqd_product/apps/actions/apps.py b/liqd_product/apps/actions/apps.py --- a/liqd_product/apps/actions/apps.py +++ b/liqd_product/apps/actions/apps.py @@ -35,6 +35,6 @@ configure_icon('far fa-comment', type='comment') configure_icon('far fa-lightbulb', type='item') configure_icon('fas fa-plus', verb=Verbs.ADD) - configure_icon('fas fa-pencil-alt', verb=Verbs.UPDATE) + configure_icon('fas fa-pencil', verb=Verbs.UPDATE) configure_icon('fas fa-flag', verb=Verbs.START) - configure_icon('far fa-clock', verb=Verbs.SCHEDULE) + configure_icon('far fa-clock-o', verb=Verbs.SCHEDULE)
{"golden_diff": "diff --git a/liqd_product/apps/actions/apps.py b/liqd_product/apps/actions/apps.py\n--- a/liqd_product/apps/actions/apps.py\n+++ b/liqd_product/apps/actions/apps.py\n@@ -35,6 +35,6 @@\n configure_icon('far fa-comment', type='comment')\n configure_icon('far fa-lightbulb', type='item')\n configure_icon('fas fa-plus', verb=Verbs.ADD)\n- configure_icon('fas fa-pencil-alt', verb=Verbs.UPDATE)\n+ configure_icon('fas fa-pencil', verb=Verbs.UPDATE)\n configure_icon('fas fa-flag', verb=Verbs.START)\n- configure_icon('far fa-clock', verb=Verbs.SCHEDULE)\n+ configure_icon('far fa-clock-o', verb=Verbs.SCHEDULE)\n", "issue": "can't see full time when creating an event on small screen\n![bildschirmfoto 2018-07-13 um 10 57 54](https://user-images.githubusercontent.com/41073836/42682788-d0101a54-868b-11e8-816e-e77dc5f8a60e.png)\r\n\n", "before_files": [{"content": "from django.apps import AppConfig\n\n\nclass Config(AppConfig):\n name = 'liqd_product.apps.actions'\n label = 'liqd_product_actions'\n\n def ready(self):\n from adhocracy4.actions.models import configure_icon\n from adhocracy4.actions.models import configure_type\n from adhocracy4.actions.verbs import Verbs\n configure_type(\n 'project',\n ('a4projects', 'project')\n )\n configure_type(\n 'phase',\n ('a4phases', 'phase')\n )\n configure_type(\n 'comment',\n ('a4comments', 'comment')\n )\n configure_type(\n 'rating',\n ('a4ratings', 'rating')\n )\n configure_type(\n 'item',\n ('liqd_product_budgeting', 'proposal'),\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_mapideas', 'mapidea')\n )\n\n configure_icon('far fa-comment', type='comment')\n configure_icon('far fa-lightbulb', type='item')\n configure_icon('fas fa-plus', verb=Verbs.ADD)\n configure_icon('fas fa-pencil-alt', verb=Verbs.UPDATE)\n configure_icon('fas fa-flag', verb=Verbs.START)\n configure_icon('far fa-clock', verb=Verbs.SCHEDULE)\n", "path": "liqd_product/apps/actions/apps.py"}], "after_files": [{"content": "from django.apps import AppConfig\n\n\nclass Config(AppConfig):\n name = 'liqd_product.apps.actions'\n label = 'liqd_product_actions'\n\n def ready(self):\n from adhocracy4.actions.models import configure_icon\n from adhocracy4.actions.models import configure_type\n from adhocracy4.actions.verbs import Verbs\n configure_type(\n 'project',\n ('a4projects', 'project')\n )\n configure_type(\n 'phase',\n ('a4phases', 'phase')\n )\n configure_type(\n 'comment',\n ('a4comments', 'comment')\n )\n configure_type(\n 'rating',\n ('a4ratings', 'rating')\n )\n configure_type(\n 'item',\n ('liqd_product_budgeting', 'proposal'),\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_mapideas', 'mapidea')\n )\n\n configure_icon('far fa-comment', type='comment')\n configure_icon('far fa-lightbulb', type='item')\n configure_icon('fas fa-plus', verb=Verbs.ADD)\n configure_icon('fas fa-pencil', verb=Verbs.UPDATE)\n configure_icon('fas fa-flag', verb=Verbs.START)\n configure_icon('far fa-clock-o', verb=Verbs.SCHEDULE)\n", "path": "liqd_product/apps/actions/apps.py"}]}
717
170
gh_patches_debug_35338
rasdani/github-patches
git_diff
joke2k__faker-270
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- some generated UK postcodes are invalid UK postcodes follow strict rules and there are a limited set of codes for each part of the postcode. Faker does not know about these rules and generates postcodes such as: `XC9E 1FL` and `U93 2ZU` which are invalid. See e.g. https://github.com/hamstah/ukpostcodeparser for more info. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `faker/providers/address/en_GB/__init__.py` Content: ``` 1 from __future__ import unicode_literals 2 from ..en import Provider as AddressProvider 3 4 5 class Provider(AddressProvider): 6 city_prefixes = ('North', 'East', 'West', 'South', 'New', 'Lake', 'Port') 7 city_suffixes = ( 8 'town', 'ton', 'land', 'ville', 'berg', 'burgh', 'borough', 'bury', 'view', 'port', 'mouth', 'stad', 'furt', 9 'chester', 'mouth', 'fort', 'haven', 'side', 'shire') 10 building_number_formats = ('#', '##', '###') 11 street_suffixes = ( 12 'alley', 'avenue', 'branch', 'bridge', 'brook', 'brooks', 'burg', 'burgs', 'bypass', 'camp', 'canyon', 'cape', 13 'causeway', 'center', 'centers', 'circle', 'circles', 'cliff', 'cliffs', 'club', 'common', 'corner', 'corners', 14 'course', 'court', 'courts', 'cove', 'coves', 'creek', 'crescent', 'crest', 'crossing', 'crossroad', 'curve', 15 'dale', 'dam', 'divide', 'drive', 'drive', 'drives', 'estate', 'estates', 'expressway', 'extension', 16 'extensions', 17 'fall', 'falls', 'ferry', 'field', 'fields', 'flat', 'flats', 'ford', 'fords', 'forest', 'forge', 'forges', 18 'fork', 19 'forks', 'fort', 'freeway', 'garden', 'gardens', 'gateway', 'glen', 'glens', 'green', 'greens', 'grove', 20 'groves', 21 'harbor', 'harbors', 'haven', 'heights', 'highway', 'hill', 'hills', 'hollow', 'inlet', 'inlet', 'island', 22 'island', 23 'islands', 'islands', 'isle', 'isle', 'junction', 'junctions', 'key', 'keys', 'knoll', 'knolls', 'lake', 24 'lakes', 25 'land', 'landing', 'lane', 'light', 'lights', 'loaf', 'lock', 'locks', 'locks', 'lodge', 'lodge', 'loop', 26 'mall', 27 'manor', 'manors', 'meadow', 'meadows', 'mews', 'mill', 'mills', 'mission', 'mission', 'motorway', 'mount', 28 'mountain', 'mountain', 'mountains', 'mountains', 'neck', 'orchard', 'oval', 'overpass', 'park', 'parks', 29 'parkway', 30 'parkways', 'pass', 'passage', 'path', 'pike', 'pine', 'pines', 'place', 'plain', 'plains', 'plains', 'plaza', 31 'plaza', 'point', 'points', 'port', 'port', 'ports', 'ports', 'prairie', 'prairie', 'radial', 'ramp', 'ranch', 32 'rapid', 'rapids', 'rest', 'ridge', 'ridges', 'river', 'road', 'road', 'roads', 'roads', 'route', 'row', 'rue', 33 'run', 'shoal', 'shoals', 'shore', 'shores', 'skyway', 'spring', 'springs', 'springs', 'spur', 'spurs', 34 'square', 35 'square', 'squares', 'squares', 'station', 'station', 'stravenue', 'stravenue', 'stream', 'stream', 'street', 36 'street', 'streets', 'summit', 'summit', 'terrace', 'throughway', 'trace', 'track', 'trafficway', 'trail', 37 'trail', 38 'tunnel', 'tunnel', 'turnpike', 'turnpike', 'underpass', 'union', 'unions', 'valley', 'valleys', 'via', 39 'viaduct', 40 'view', 'views', 'village', 'village', 'villages', 'ville', 'vista', 'vista', 'walk', 'walks', 'wall', 'way', 41 'ways', 'well', 'wells') 42 43 postcode_formats = ('??#? #??', '?#? #??', '?# #??', '?## #??', '??# #??', '??## #??',) 44 45 city_formats = ( 46 '{{city_prefix}} {{first_name}}{{city_suffix}}', 47 '{{city_prefix}} {{first_name}}', 48 '{{first_name}}{{city_suffix}}', 49 '{{last_name}}{{city_suffix}}', 50 ) 51 street_name_formats = ( 52 '{{first_name}} {{street_suffix}}', 53 '{{last_name}} {{street_suffix}}' 54 ) 55 street_address_formats = ( 56 '{{building_number}} {{street_name}}', 57 '{{secondary_address}}\n{{street_name}}', 58 ) 59 address_formats = ( 60 "{{street_address}}\n{{city}}\n{{postcode}}", 61 ) 62 secondary_address_formats = ('Flat #', 'Flat ##', 'Flat ##?', 'Studio #', 'Studio ##', 'Studio ##?') 63 64 @classmethod 65 def city_prefix(cls): 66 return cls.random_element(cls.city_prefixes) 67 68 @classmethod 69 def secondary_address(cls): 70 return cls.bothify(cls.random_element(cls.secondary_address_formats)) 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/faker/providers/address/en_GB/__init__.py b/faker/providers/address/en_GB/__init__.py --- a/faker/providers/address/en_GB/__init__.py +++ b/faker/providers/address/en_GB/__init__.py @@ -40,7 +40,44 @@ 'view', 'views', 'village', 'village', 'villages', 'ville', 'vista', 'vista', 'walk', 'walks', 'wall', 'way', 'ways', 'well', 'wells') - postcode_formats = ('??#? #??', '?#? #??', '?# #??', '?## #??', '??# #??', '??## #??',) + POSTAL_ZONES = ( + 'AB', 'AL', 'B' , 'BA', 'BB', 'BD', 'BH', 'BL', 'BN', 'BR', + 'BS', 'BT', 'CA', 'CB', 'CF', 'CH', 'CM', 'CO', 'CR', 'CT', + 'CV', 'CW', 'DA', 'DD', 'DE', 'DG', 'DH', 'DL', 'DN', 'DT', + 'DY', 'E' , 'EC', 'EH', 'EN', 'EX', 'FK', 'FY', 'G' , 'GL', + 'GY', 'GU', 'HA', 'HD', 'HG', 'HP', 'HR', 'HS', 'HU', 'HX', + 'IG', 'IM', 'IP', 'IV', 'JE', 'KA', 'KT', 'KW', 'KY', 'L' , + 'LA', 'LD', 'LE', 'LL', 'LN', 'LS', 'LU', 'M' , 'ME', 'MK', + 'ML', 'N' , 'NE', 'NG', 'NN', 'NP', 'NR', 'NW', 'OL', 'OX', + 'PA', 'PE', 'PH', 'PL', 'PO', 'PR', 'RG', 'RH', 'RM', 'S' , + 'SA', 'SE', 'SG', 'SK', 'SL', 'SM', 'SN', 'SO', 'SP', 'SR', + 'SS', 'ST', 'SW', 'SY', 'TA', 'TD', 'TF', 'TN', 'TQ', 'TR', + 'TS', 'TW', 'UB', 'W' , 'WA', 'WC', 'WD', 'WF', 'WN', 'WR', + 'WS', 'WV', 'YO', 'ZE' + ) + + POSTAL_ZONES_ONE_CHAR = [zone for zone in POSTAL_ZONES if len(zone) == 1] + POSTAL_ZONES_TWO_CHARS = [zone for zone in POSTAL_ZONES if len(zone) == 2] + + postcode_formats = ( + 'AN NEE', + 'ANN NEE', + 'PN NEE', + 'PNN NEE', + 'ANC NEE', + 'PND NEE', + ) + + _postcode_sets = { + ' ': ' ', + 'N': [str(i) for i in range(0, 10)], + 'A': POSTAL_ZONES_ONE_CHAR, + 'B': 'ABCDEFGHKLMNOPQRSTUVWXY', + 'C': 'ABCDEFGHJKSTUW', + 'D': 'ABEHMNPRVWXY', + 'E': 'ABDEFGHJLNPQRSTUWXYZ', + 'P': POSTAL_ZONES_TWO_CHARS, + } city_formats = ( '{{city_prefix}} {{first_name}}{{city_suffix}}', @@ -61,6 +98,17 @@ ) secondary_address_formats = ('Flat #', 'Flat ##', 'Flat ##?', 'Studio #', 'Studio ##', 'Studio ##?') + @classmethod + def postcode(cls): + """ + See http://web.archive.org/web/20090930140939/http://www.govtalk.gov.uk/gdsc/html/noframes/PostCode-2-1-Release.htm + """ + postcode = '' + pattern = cls.random_element(cls.postcode_formats) + for placeholder in pattern: + postcode += cls.random_element(cls._postcode_sets[placeholder]) + return postcode + @classmethod def city_prefix(cls): return cls.random_element(cls.city_prefixes)
{"golden_diff": "diff --git a/faker/providers/address/en_GB/__init__.py b/faker/providers/address/en_GB/__init__.py\n--- a/faker/providers/address/en_GB/__init__.py\n+++ b/faker/providers/address/en_GB/__init__.py\n@@ -40,7 +40,44 @@\n 'view', 'views', 'village', 'village', 'villages', 'ville', 'vista', 'vista', 'walk', 'walks', 'wall', 'way',\n 'ways', 'well', 'wells')\n \n- postcode_formats = ('??#? #??', '?#? #??', '?# #??', '?## #??', '??# #??', '??## #??',)\n+ POSTAL_ZONES = (\n+ 'AB', 'AL', 'B' , 'BA', 'BB', 'BD', 'BH', 'BL', 'BN', 'BR',\n+ 'BS', 'BT', 'CA', 'CB', 'CF', 'CH', 'CM', 'CO', 'CR', 'CT',\n+ 'CV', 'CW', 'DA', 'DD', 'DE', 'DG', 'DH', 'DL', 'DN', 'DT',\n+ 'DY', 'E' , 'EC', 'EH', 'EN', 'EX', 'FK', 'FY', 'G' , 'GL',\n+ 'GY', 'GU', 'HA', 'HD', 'HG', 'HP', 'HR', 'HS', 'HU', 'HX',\n+ 'IG', 'IM', 'IP', 'IV', 'JE', 'KA', 'KT', 'KW', 'KY', 'L' ,\n+ 'LA', 'LD', 'LE', 'LL', 'LN', 'LS', 'LU', 'M' , 'ME', 'MK',\n+ 'ML', 'N' , 'NE', 'NG', 'NN', 'NP', 'NR', 'NW', 'OL', 'OX',\n+ 'PA', 'PE', 'PH', 'PL', 'PO', 'PR', 'RG', 'RH', 'RM', 'S' ,\n+ 'SA', 'SE', 'SG', 'SK', 'SL', 'SM', 'SN', 'SO', 'SP', 'SR',\n+ 'SS', 'ST', 'SW', 'SY', 'TA', 'TD', 'TF', 'TN', 'TQ', 'TR',\n+ 'TS', 'TW', 'UB', 'W' , 'WA', 'WC', 'WD', 'WF', 'WN', 'WR',\n+ 'WS', 'WV', 'YO', 'ZE'\n+ )\n+\n+ POSTAL_ZONES_ONE_CHAR = [zone for zone in POSTAL_ZONES if len(zone) == 1]\n+ POSTAL_ZONES_TWO_CHARS = [zone for zone in POSTAL_ZONES if len(zone) == 2]\n+\n+ postcode_formats = (\n+ 'AN NEE',\n+ 'ANN NEE',\n+ 'PN NEE',\n+ 'PNN NEE',\n+ 'ANC NEE',\n+ 'PND NEE',\n+ )\n+\n+ _postcode_sets = {\n+ ' ': ' ',\n+ 'N': [str(i) for i in range(0, 10)],\n+ 'A': POSTAL_ZONES_ONE_CHAR,\n+ 'B': 'ABCDEFGHKLMNOPQRSTUVWXY',\n+ 'C': 'ABCDEFGHJKSTUW',\n+ 'D': 'ABEHMNPRVWXY',\n+ 'E': 'ABDEFGHJLNPQRSTUWXYZ',\n+ 'P': POSTAL_ZONES_TWO_CHARS,\n+ }\n \n city_formats = (\n '{{city_prefix}} {{first_name}}{{city_suffix}}',\n@@ -61,6 +98,17 @@\n )\n secondary_address_formats = ('Flat #', 'Flat ##', 'Flat ##?', 'Studio #', 'Studio ##', 'Studio ##?')\n \n+ @classmethod\n+ def postcode(cls):\n+ \"\"\"\n+ See http://web.archive.org/web/20090930140939/http://www.govtalk.gov.uk/gdsc/html/noframes/PostCode-2-1-Release.htm\n+ \"\"\"\n+ postcode = ''\n+ pattern = cls.random_element(cls.postcode_formats)\n+ for placeholder in pattern:\n+ postcode += cls.random_element(cls._postcode_sets[placeholder])\n+ return postcode\n+\n @classmethod\n def city_prefix(cls):\n return cls.random_element(cls.city_prefixes)\n", "issue": "some generated UK postcodes are invalid\nUK postcodes follow strict rules and there are a limited set of codes for each part of the postcode. Faker does not know about these rules and generates postcodes such as: `XC9E 1FL` and `U93 2ZU` which are invalid. See e.g. https://github.com/hamstah/ukpostcodeparser for more info.\n\n", "before_files": [{"content": "from __future__ import unicode_literals \nfrom ..en import Provider as AddressProvider\n\n\nclass Provider(AddressProvider):\n city_prefixes = ('North', 'East', 'West', 'South', 'New', 'Lake', 'Port')\n city_suffixes = (\n 'town', 'ton', 'land', 'ville', 'berg', 'burgh', 'borough', 'bury', 'view', 'port', 'mouth', 'stad', 'furt',\n 'chester', 'mouth', 'fort', 'haven', 'side', 'shire')\n building_number_formats = ('#', '##', '###')\n street_suffixes = (\n 'alley', 'avenue', 'branch', 'bridge', 'brook', 'brooks', 'burg', 'burgs', 'bypass', 'camp', 'canyon', 'cape',\n 'causeway', 'center', 'centers', 'circle', 'circles', 'cliff', 'cliffs', 'club', 'common', 'corner', 'corners',\n 'course', 'court', 'courts', 'cove', 'coves', 'creek', 'crescent', 'crest', 'crossing', 'crossroad', 'curve',\n 'dale', 'dam', 'divide', 'drive', 'drive', 'drives', 'estate', 'estates', 'expressway', 'extension',\n 'extensions',\n 'fall', 'falls', 'ferry', 'field', 'fields', 'flat', 'flats', 'ford', 'fords', 'forest', 'forge', 'forges',\n 'fork',\n 'forks', 'fort', 'freeway', 'garden', 'gardens', 'gateway', 'glen', 'glens', 'green', 'greens', 'grove',\n 'groves',\n 'harbor', 'harbors', 'haven', 'heights', 'highway', 'hill', 'hills', 'hollow', 'inlet', 'inlet', 'island',\n 'island',\n 'islands', 'islands', 'isle', 'isle', 'junction', 'junctions', 'key', 'keys', 'knoll', 'knolls', 'lake',\n 'lakes',\n 'land', 'landing', 'lane', 'light', 'lights', 'loaf', 'lock', 'locks', 'locks', 'lodge', 'lodge', 'loop',\n 'mall',\n 'manor', 'manors', 'meadow', 'meadows', 'mews', 'mill', 'mills', 'mission', 'mission', 'motorway', 'mount',\n 'mountain', 'mountain', 'mountains', 'mountains', 'neck', 'orchard', 'oval', 'overpass', 'park', 'parks',\n 'parkway',\n 'parkways', 'pass', 'passage', 'path', 'pike', 'pine', 'pines', 'place', 'plain', 'plains', 'plains', 'plaza',\n 'plaza', 'point', 'points', 'port', 'port', 'ports', 'ports', 'prairie', 'prairie', 'radial', 'ramp', 'ranch',\n 'rapid', 'rapids', 'rest', 'ridge', 'ridges', 'river', 'road', 'road', 'roads', 'roads', 'route', 'row', 'rue',\n 'run', 'shoal', 'shoals', 'shore', 'shores', 'skyway', 'spring', 'springs', 'springs', 'spur', 'spurs',\n 'square',\n 'square', 'squares', 'squares', 'station', 'station', 'stravenue', 'stravenue', 'stream', 'stream', 'street',\n 'street', 'streets', 'summit', 'summit', 'terrace', 'throughway', 'trace', 'track', 'trafficway', 'trail',\n 'trail',\n 'tunnel', 'tunnel', 'turnpike', 'turnpike', 'underpass', 'union', 'unions', 'valley', 'valleys', 'via',\n 'viaduct',\n 'view', 'views', 'village', 'village', 'villages', 'ville', 'vista', 'vista', 'walk', 'walks', 'wall', 'way',\n 'ways', 'well', 'wells')\n\n postcode_formats = ('??#? #??', '?#? #??', '?# #??', '?## #??', '??# #??', '??## #??',)\n\n city_formats = (\n '{{city_prefix}} {{first_name}}{{city_suffix}}',\n '{{city_prefix}} {{first_name}}',\n '{{first_name}}{{city_suffix}}',\n '{{last_name}}{{city_suffix}}',\n )\n street_name_formats = (\n '{{first_name}} {{street_suffix}}',\n '{{last_name}} {{street_suffix}}'\n )\n street_address_formats = (\n '{{building_number}} {{street_name}}',\n '{{secondary_address}}\\n{{street_name}}',\n )\n address_formats = (\n \"{{street_address}}\\n{{city}}\\n{{postcode}}\",\n )\n secondary_address_formats = ('Flat #', 'Flat ##', 'Flat ##?', 'Studio #', 'Studio ##', 'Studio ##?')\n\n @classmethod\n def city_prefix(cls):\n return cls.random_element(cls.city_prefixes)\n\n @classmethod\n def secondary_address(cls):\n return cls.bothify(cls.random_element(cls.secondary_address_formats))\n", "path": "faker/providers/address/en_GB/__init__.py"}], "after_files": [{"content": "from __future__ import unicode_literals \nfrom ..en import Provider as AddressProvider\n\n\nclass Provider(AddressProvider):\n city_prefixes = ('North', 'East', 'West', 'South', 'New', 'Lake', 'Port')\n city_suffixes = (\n 'town', 'ton', 'land', 'ville', 'berg', 'burgh', 'borough', 'bury', 'view', 'port', 'mouth', 'stad', 'furt',\n 'chester', 'mouth', 'fort', 'haven', 'side', 'shire')\n building_number_formats = ('#', '##', '###')\n street_suffixes = (\n 'alley', 'avenue', 'branch', 'bridge', 'brook', 'brooks', 'burg', 'burgs', 'bypass', 'camp', 'canyon', 'cape',\n 'causeway', 'center', 'centers', 'circle', 'circles', 'cliff', 'cliffs', 'club', 'common', 'corner', 'corners',\n 'course', 'court', 'courts', 'cove', 'coves', 'creek', 'crescent', 'crest', 'crossing', 'crossroad', 'curve',\n 'dale', 'dam', 'divide', 'drive', 'drive', 'drives', 'estate', 'estates', 'expressway', 'extension',\n 'extensions',\n 'fall', 'falls', 'ferry', 'field', 'fields', 'flat', 'flats', 'ford', 'fords', 'forest', 'forge', 'forges',\n 'fork',\n 'forks', 'fort', 'freeway', 'garden', 'gardens', 'gateway', 'glen', 'glens', 'green', 'greens', 'grove',\n 'groves',\n 'harbor', 'harbors', 'haven', 'heights', 'highway', 'hill', 'hills', 'hollow', 'inlet', 'inlet', 'island',\n 'island',\n 'islands', 'islands', 'isle', 'isle', 'junction', 'junctions', 'key', 'keys', 'knoll', 'knolls', 'lake',\n 'lakes',\n 'land', 'landing', 'lane', 'light', 'lights', 'loaf', 'lock', 'locks', 'locks', 'lodge', 'lodge', 'loop',\n 'mall',\n 'manor', 'manors', 'meadow', 'meadows', 'mews', 'mill', 'mills', 'mission', 'mission', 'motorway', 'mount',\n 'mountain', 'mountain', 'mountains', 'mountains', 'neck', 'orchard', 'oval', 'overpass', 'park', 'parks',\n 'parkway',\n 'parkways', 'pass', 'passage', 'path', 'pike', 'pine', 'pines', 'place', 'plain', 'plains', 'plains', 'plaza',\n 'plaza', 'point', 'points', 'port', 'port', 'ports', 'ports', 'prairie', 'prairie', 'radial', 'ramp', 'ranch',\n 'rapid', 'rapids', 'rest', 'ridge', 'ridges', 'river', 'road', 'road', 'roads', 'roads', 'route', 'row', 'rue',\n 'run', 'shoal', 'shoals', 'shore', 'shores', 'skyway', 'spring', 'springs', 'springs', 'spur', 'spurs',\n 'square',\n 'square', 'squares', 'squares', 'station', 'station', 'stravenue', 'stravenue', 'stream', 'stream', 'street',\n 'street', 'streets', 'summit', 'summit', 'terrace', 'throughway', 'trace', 'track', 'trafficway', 'trail',\n 'trail',\n 'tunnel', 'tunnel', 'turnpike', 'turnpike', 'underpass', 'union', 'unions', 'valley', 'valleys', 'via',\n 'viaduct',\n 'view', 'views', 'village', 'village', 'villages', 'ville', 'vista', 'vista', 'walk', 'walks', 'wall', 'way',\n 'ways', 'well', 'wells')\n\n POSTAL_ZONES = (\n 'AB', 'AL', 'B' , 'BA', 'BB', 'BD', 'BH', 'BL', 'BN', 'BR',\n 'BS', 'BT', 'CA', 'CB', 'CF', 'CH', 'CM', 'CO', 'CR', 'CT',\n 'CV', 'CW', 'DA', 'DD', 'DE', 'DG', 'DH', 'DL', 'DN', 'DT',\n 'DY', 'E' , 'EC', 'EH', 'EN', 'EX', 'FK', 'FY', 'G' , 'GL',\n 'GY', 'GU', 'HA', 'HD', 'HG', 'HP', 'HR', 'HS', 'HU', 'HX',\n 'IG', 'IM', 'IP', 'IV', 'JE', 'KA', 'KT', 'KW', 'KY', 'L' ,\n 'LA', 'LD', 'LE', 'LL', 'LN', 'LS', 'LU', 'M' , 'ME', 'MK',\n 'ML', 'N' , 'NE', 'NG', 'NN', 'NP', 'NR', 'NW', 'OL', 'OX',\n 'PA', 'PE', 'PH', 'PL', 'PO', 'PR', 'RG', 'RH', 'RM', 'S' ,\n 'SA', 'SE', 'SG', 'SK', 'SL', 'SM', 'SN', 'SO', 'SP', 'SR',\n 'SS', 'ST', 'SW', 'SY', 'TA', 'TD', 'TF', 'TN', 'TQ', 'TR',\n 'TS', 'TW', 'UB', 'W' , 'WA', 'WC', 'WD', 'WF', 'WN', 'WR',\n 'WS', 'WV', 'YO', 'ZE'\n )\n\n POSTAL_ZONES_ONE_CHAR = [zone for zone in POSTAL_ZONES if len(zone) == 1]\n POSTAL_ZONES_TWO_CHARS = [zone for zone in POSTAL_ZONES if len(zone) == 2]\n\n postcode_formats = (\n 'AN NEE',\n 'ANN NEE',\n 'PN NEE',\n 'PNN NEE',\n 'ANC NEE',\n 'PND NEE',\n )\n\n _postcode_sets = {\n ' ': ' ',\n 'N': [str(i) for i in range(0, 10)],\n 'A': POSTAL_ZONES_ONE_CHAR,\n 'B': 'ABCDEFGHKLMNOPQRSTUVWXY',\n 'C': 'ABCDEFGHJKSTUW',\n 'D': 'ABEHMNPRVWXY',\n 'E': 'ABDEFGHJLNPQRSTUWXYZ',\n 'P': POSTAL_ZONES_TWO_CHARS,\n }\n\n city_formats = (\n '{{city_prefix}} {{first_name}}{{city_suffix}}',\n '{{city_prefix}} {{first_name}}',\n '{{first_name}}{{city_suffix}}',\n '{{last_name}}{{city_suffix}}',\n )\n street_name_formats = (\n '{{first_name}} {{street_suffix}}',\n '{{last_name}} {{street_suffix}}'\n )\n street_address_formats = (\n '{{building_number}} {{street_name}}',\n '{{secondary_address}}\\n{{street_name}}',\n )\n address_formats = (\n \"{{street_address}}\\n{{city}}\\n{{postcode}}\",\n )\n secondary_address_formats = ('Flat #', 'Flat ##', 'Flat ##?', 'Studio #', 'Studio ##', 'Studio ##?')\n\n @classmethod\n def postcode(cls):\n \"\"\"\n See http://web.archive.org/web/20090930140939/http://www.govtalk.gov.uk/gdsc/html/noframes/PostCode-2-1-Release.htm\n \"\"\"\n postcode = ''\n pattern = cls.random_element(cls.postcode_formats)\n for placeholder in pattern:\n postcode += cls.random_element(cls._postcode_sets[placeholder])\n return postcode\n\n @classmethod\n def city_prefix(cls):\n return cls.random_element(cls.city_prefixes)\n\n @classmethod\n def secondary_address(cls):\n return cls.bothify(cls.random_element(cls.secondary_address_formats))\n", "path": "faker/providers/address/en_GB/__init__.py"}]}
1,691
1,001
gh_patches_debug_64108
rasdani/github-patches
git_diff
facebookresearch__hydra-2242
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bug] Colorlog plugin generates `.log` file in cwd instead of output dir # 🐛 Bug I'm using hydra v1.2 with `chdir` set to false. When I don't use colorlog plugin, the `.log` file with python logs gets generated in my output directory (as expected). But when I attach colorlog plugin with: ```yaml defaults: - override hydra/hydra_logging: colorlog - override hydra/job_logging: colorlog ``` The `.log` file gets generated in current working directory ## Checklist - [x] I checked on the latest version of Hydra - [ ] I created a minimal repro (See [this](https://stackoverflow.com/help/minimal-reproducible-example) for tips). ## Expected Behavior I would expect the `.log` file to be always saved in output directory by default. ## System information - **Hydra Version** : 1.2 - **Python version** : 3.10 - **Virtual environment type and version** : - **Operating system** : linux --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugins/hydra_colorlog/hydra_plugins/hydra_colorlog/__init__.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 3 __version__ = "1.2.0" 4 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugins/hydra_colorlog/hydra_plugins/hydra_colorlog/__init__.py b/plugins/hydra_colorlog/hydra_plugins/hydra_colorlog/__init__.py --- a/plugins/hydra_colorlog/hydra_plugins/hydra_colorlog/__init__.py +++ b/plugins/hydra_colorlog/hydra_plugins/hydra_colorlog/__init__.py @@ -1,3 +1,3 @@ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -__version__ = "1.2.0" +__version__ = "1.2.1"
{"golden_diff": "diff --git a/plugins/hydra_colorlog/hydra_plugins/hydra_colorlog/__init__.py b/plugins/hydra_colorlog/hydra_plugins/hydra_colorlog/__init__.py\n--- a/plugins/hydra_colorlog/hydra_plugins/hydra_colorlog/__init__.py\n+++ b/plugins/hydra_colorlog/hydra_plugins/hydra_colorlog/__init__.py\n@@ -1,3 +1,3 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n \n-__version__ = \"1.2.0\"\n+__version__ = \"1.2.1\"\n", "issue": "[Bug] Colorlog plugin generates `.log` file in cwd instead of output dir\n# \ud83d\udc1b Bug\r\nI'm using hydra v1.2 with `chdir` set to false.\r\n\r\nWhen I don't use colorlog plugin, the `.log` file with python logs gets generated in my output directory (as expected).\r\n\r\nBut when I attach colorlog plugin with:\r\n```yaml\r\ndefaults:\r\n - override hydra/hydra_logging: colorlog\r\n - override hydra/job_logging: colorlog\r\n```\r\nThe `.log` file gets generated in current working directory\r\n\r\n## Checklist\r\n- [x] I checked on the latest version of Hydra\r\n- [ ] I created a minimal repro (See [this](https://stackoverflow.com/help/minimal-reproducible-example) for tips).\r\n\r\n## Expected Behavior\r\nI would expect the `.log` file to be always saved in output directory by default.\r\n\r\n## System information\r\n- **Hydra Version** : 1.2\r\n- **Python version** : 3.10\r\n- **Virtual environment type and version** : \r\n- **Operating system** : linux\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n__version__ = \"1.2.0\"\n", "path": "plugins/hydra_colorlog/hydra_plugins/hydra_colorlog/__init__.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n__version__ = \"1.2.1\"\n", "path": "plugins/hydra_colorlog/hydra_plugins/hydra_colorlog/__init__.py"}]}
544
139
gh_patches_debug_11242
rasdani/github-patches
git_diff
getpelican__pelican-1002
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- "'dict_keys' object does not support indexing" in Python 3 using sourcecode directive When I include a code-block with an option, like this: ``` .. sourcecode:: python :linenos: ... ``` a WARNING appears and the corresponding file is not processed: ``` WARNING: Could not process /home/juanlu/Development/Python/pelican_test/myproject/content/2013-07-14_hello-world.rst 'dict_keys' object does not support indexing ``` The problem is here: https://github.com/getpelican/pelican/blob/master/pelican/rstdirectives.py#L35 and the solution is detailed here: http://stackoverflow.com/questions/8953627/python-dictionary-keys-error I have read the guidelines but, even being a trivial fix: ``` --- rstdirectives.py 2013-07-14 12:41:00.188687997 +0200 +++ rstdirectives.py.new 2013-07-14 12:36:25.982005000 +0200 @@ -32,7 +32,7 @@ # no lexer found - use the text one instead of an exception lexer = TextLexer() # take an arbitrary option if more than one is given - formatter = self.options and VARIANTS[self.options.keys()[0]] \ + formatter = self.options and VARIANTS[list(self.options.keys())[0]] \ or DEFAULT parsed = highlight('\n'.join(self.content), lexer, formatter) return [nodes.raw('', parsed, format='html')] ``` I don't have time to add docs, tests, run the test suite and, summing up, doing it properly. Hence the issue without pull request. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pelican/rstdirectives.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from __future__ import unicode_literals, print_function 3 4 from docutils import nodes, utils 5 from docutils.parsers.rst import directives, roles, Directive 6 from pygments.formatters import HtmlFormatter 7 from pygments import highlight 8 from pygments.lexers import get_lexer_by_name, TextLexer 9 import re 10 11 INLINESTYLES = False 12 DEFAULT = HtmlFormatter(noclasses=INLINESTYLES) 13 VARIANTS = { 14 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True), 15 } 16 17 18 class Pygments(Directive): 19 """ Source code syntax hightlighting. 20 """ 21 required_arguments = 1 22 optional_arguments = 0 23 final_argument_whitespace = True 24 option_spec = dict([(key, directives.flag) for key in VARIANTS]) 25 has_content = True 26 27 def run(self): 28 self.assert_has_content() 29 try: 30 lexer = get_lexer_by_name(self.arguments[0]) 31 except ValueError: 32 # no lexer found - use the text one instead of an exception 33 lexer = TextLexer() 34 # take an arbitrary option if more than one is given 35 formatter = self.options and VARIANTS[self.options.keys()[0]] \ 36 or DEFAULT 37 parsed = highlight('\n'.join(self.content), lexer, formatter) 38 return [nodes.raw('', parsed, format='html')] 39 40 directives.register_directive('code-block', Pygments) 41 directives.register_directive('sourcecode', Pygments) 42 43 44 class YouTube(Directive): 45 """ Embed YouTube video in posts. 46 47 Courtesy of Brian Hsu: https://gist.github.com/1422773 48 49 VIDEO_ID is required, with / height are optional integer, 50 and align could be left / center / right. 51 52 Usage: 53 .. youtube:: VIDEO_ID 54 :width: 640 55 :height: 480 56 :align: center 57 """ 58 59 def align(argument): 60 """Conversion function for the "align" option.""" 61 return directives.choice(argument, ('left', 'center', 'right')) 62 63 required_arguments = 1 64 optional_arguments = 2 65 option_spec = { 66 'width': directives.positive_int, 67 'height': directives.positive_int, 68 'align': align 69 } 70 71 final_argument_whitespace = False 72 has_content = False 73 74 def run(self): 75 videoID = self.arguments[0].strip() 76 width = 420 77 height = 315 78 align = 'left' 79 80 if 'width' in self.options: 81 width = self.options['width'] 82 83 if 'height' in self.options: 84 height = self.options['height'] 85 86 if 'align' in self.options: 87 align = self.options['align'] 88 89 url = 'http://www.youtube.com/embed/%s' % videoID 90 div_block = '<div class="youtube" align="%s">' % align 91 embed_block = '<iframe width="%s" height="%s" src="%s" '\ 92 'frameborder="0"></iframe>' % (width, height, url) 93 94 return [ 95 nodes.raw('', div_block, format='html'), 96 nodes.raw('', embed_block, format='html'), 97 nodes.raw('', '</div>', format='html')] 98 99 directives.register_directive('youtube', YouTube) 100 101 _abbr_re = re.compile('\((.*)\)$') 102 103 104 class abbreviation(nodes.Inline, nodes.TextElement): 105 pass 106 107 108 def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): 109 text = utils.unescape(text) 110 m = _abbr_re.search(text) 111 if m is None: 112 return [abbreviation(text, text)], [] 113 abbr = text[:m.start()].strip() 114 expl = m.group(1) 115 return [abbreviation(abbr, abbr, explanation=expl)], [] 116 117 roles.register_local_role('abbr', abbr_role) 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py --- a/pelican/rstdirectives.py +++ b/pelican/rstdirectives.py @@ -32,7 +32,7 @@ # no lexer found - use the text one instead of an exception lexer = TextLexer() # take an arbitrary option if more than one is given - formatter = self.options and VARIANTS[self.options.keys()[0]] \ + formatter = self.options and VARIANTS[list(self.options.keys())[0]] \ or DEFAULT parsed = highlight('\n'.join(self.content), lexer, formatter) return [nodes.raw('', parsed, format='html')]
{"golden_diff": "diff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py\n--- a/pelican/rstdirectives.py\n+++ b/pelican/rstdirectives.py\n@@ -32,7 +32,7 @@\n # no lexer found - use the text one instead of an exception\n lexer = TextLexer()\n # take an arbitrary option if more than one is given\n- formatter = self.options and VARIANTS[self.options.keys()[0]] \\\n+ formatter = self.options and VARIANTS[list(self.options.keys())[0]] \\\n or DEFAULT\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n", "issue": "\"'dict_keys' object does not support indexing\" in Python 3 using sourcecode directive\nWhen I include a code-block with an option, like this:\n\n```\n.. sourcecode:: python\n :linenos:\n\n ...\n```\n\na WARNING appears and the corresponding file is not processed:\n\n```\nWARNING: Could not process /home/juanlu/Development/Python/pelican_test/myproject/content/2013-07-14_hello-world.rst\n'dict_keys' object does not support indexing\n```\n\nThe problem is here:\n\nhttps://github.com/getpelican/pelican/blob/master/pelican/rstdirectives.py#L35\n\nand the solution is detailed here:\n\nhttp://stackoverflow.com/questions/8953627/python-dictionary-keys-error\n\nI have read the guidelines but, even being a trivial fix:\n\n```\n--- rstdirectives.py 2013-07-14 12:41:00.188687997 +0200\n+++ rstdirectives.py.new 2013-07-14 12:36:25.982005000 +0200\n@@ -32,7 +32,7 @@\n # no lexer found - use the text one instead of an exception\n lexer = TextLexer()\n # take an arbitrary option if more than one is given\n- formatter = self.options and VARIANTS[self.options.keys()[0]] \\\n+ formatter = self.options and VARIANTS[list(self.options.keys())[0]] \\\n or DEFAULT\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n```\n\nI don't have time to add docs, tests, run the test suite and, summing up, doing it properly. Hence the issue without pull request.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\n\nfrom docutils import nodes, utils\nfrom docutils.parsers.rst import directives, roles, Directive\nfrom pygments.formatters import HtmlFormatter\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name, TextLexer\nimport re\n\nINLINESTYLES = False\nDEFAULT = HtmlFormatter(noclasses=INLINESTYLES)\nVARIANTS = {\n 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),\n}\n\n\nclass Pygments(Directive):\n \"\"\" Source code syntax hightlighting.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = dict([(key, directives.flag) for key in VARIANTS])\n has_content = True\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n # no lexer found - use the text one instead of an exception\n lexer = TextLexer()\n # take an arbitrary option if more than one is given\n formatter = self.options and VARIANTS[self.options.keys()[0]] \\\n or DEFAULT\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\ndirectives.register_directive('code-block', Pygments)\ndirectives.register_directive('sourcecode', Pygments)\n\n\nclass YouTube(Directive):\n \"\"\" Embed YouTube video in posts.\n\n Courtesy of Brian Hsu: https://gist.github.com/1422773\n\n VIDEO_ID is required, with / height are optional integer,\n and align could be left / center / right.\n\n Usage:\n .. youtube:: VIDEO_ID\n :width: 640\n :height: 480\n :align: center\n \"\"\"\n\n def align(argument):\n \"\"\"Conversion function for the \"align\" option.\"\"\"\n return directives.choice(argument, ('left', 'center', 'right'))\n\n required_arguments = 1\n optional_arguments = 2\n option_spec = {\n 'width': directives.positive_int,\n 'height': directives.positive_int,\n 'align': align\n }\n\n final_argument_whitespace = False\n has_content = False\n\n def run(self):\n videoID = self.arguments[0].strip()\n width = 420\n height = 315\n align = 'left'\n\n if 'width' in self.options:\n width = self.options['width']\n\n if 'height' in self.options:\n height = self.options['height']\n\n if 'align' in self.options:\n align = self.options['align']\n\n url = 'http://www.youtube.com/embed/%s' % videoID\n div_block = '<div class=\"youtube\" align=\"%s\">' % align\n embed_block = '<iframe width=\"%s\" height=\"%s\" src=\"%s\" '\\\n 'frameborder=\"0\"></iframe>' % (width, height, url)\n\n return [\n nodes.raw('', div_block, format='html'),\n nodes.raw('', embed_block, format='html'),\n nodes.raw('', '</div>', format='html')]\n\ndirectives.register_directive('youtube', YouTube)\n\n_abbr_re = re.compile('\\((.*)\\)$')\n\n\nclass abbreviation(nodes.Inline, nodes.TextElement):\n pass\n\n\ndef abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):\n text = utils.unescape(text)\n m = _abbr_re.search(text)\n if m is None:\n return [abbreviation(text, text)], []\n abbr = text[:m.start()].strip()\n expl = m.group(1)\n return [abbreviation(abbr, abbr, explanation=expl)], []\n\nroles.register_local_role('abbr', abbr_role)\n", "path": "pelican/rstdirectives.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\n\nfrom docutils import nodes, utils\nfrom docutils.parsers.rst import directives, roles, Directive\nfrom pygments.formatters import HtmlFormatter\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name, TextLexer\nimport re\n\nINLINESTYLES = False\nDEFAULT = HtmlFormatter(noclasses=INLINESTYLES)\nVARIANTS = {\n 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),\n}\n\n\nclass Pygments(Directive):\n \"\"\" Source code syntax hightlighting.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = dict([(key, directives.flag) for key in VARIANTS])\n has_content = True\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n # no lexer found - use the text one instead of an exception\n lexer = TextLexer()\n # take an arbitrary option if more than one is given\n formatter = self.options and VARIANTS[list(self.options.keys())[0]] \\\n or DEFAULT\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\ndirectives.register_directive('code-block', Pygments)\ndirectives.register_directive('sourcecode', Pygments)\n\n\nclass YouTube(Directive):\n \"\"\" Embed YouTube video in posts.\n\n Courtesy of Brian Hsu: https://gist.github.com/1422773\n\n VIDEO_ID is required, with / height are optional integer,\n and align could be left / center / right.\n\n Usage:\n .. youtube:: VIDEO_ID\n :width: 640\n :height: 480\n :align: center\n \"\"\"\n\n def align(argument):\n \"\"\"Conversion function for the \"align\" option.\"\"\"\n return directives.choice(argument, ('left', 'center', 'right'))\n\n required_arguments = 1\n optional_arguments = 2\n option_spec = {\n 'width': directives.positive_int,\n 'height': directives.positive_int,\n 'align': align\n }\n\n final_argument_whitespace = False\n has_content = False\n\n def run(self):\n videoID = self.arguments[0].strip()\n width = 420\n height = 315\n align = 'left'\n\n if 'width' in self.options:\n width = self.options['width']\n\n if 'height' in self.options:\n height = self.options['height']\n\n if 'align' in self.options:\n align = self.options['align']\n\n url = 'http://www.youtube.com/embed/%s' % videoID\n div_block = '<div class=\"youtube\" align=\"%s\">' % align\n embed_block = '<iframe width=\"%s\" height=\"%s\" src=\"%s\" '\\\n 'frameborder=\"0\"></iframe>' % (width, height, url)\n\n return [\n nodes.raw('', div_block, format='html'),\n nodes.raw('', embed_block, format='html'),\n nodes.raw('', '</div>', format='html')]\n\ndirectives.register_directive('youtube', YouTube)\n\n_abbr_re = re.compile('\\((.*)\\)$')\n\n\nclass abbreviation(nodes.Inline, nodes.TextElement):\n pass\n\n\ndef abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):\n text = utils.unescape(text)\n m = _abbr_re.search(text)\n if m is None:\n return [abbreviation(text, text)], []\n abbr = text[:m.start()].strip()\n expl = m.group(1)\n return [abbreviation(abbr, abbr, explanation=expl)], []\n\nroles.register_local_role('abbr', abbr_role)\n", "path": "pelican/rstdirectives.py"}]}
1,775
156
gh_patches_debug_40326
rasdani/github-patches
git_diff
nextcloud__appstore-201
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Delete user account A user should be able to delete his account by hitting and confirming it on the download page. The confirmation should not be able to trigger by accident, Github's delete repo ui is a good example. Before deleting his account, a user will be warned that all his comments and apps will be deleted. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nextcloudappstore/urls.py` Content: ``` 1 from allauth.account.views import signup 2 from allauth.socialaccount.views import signup as social_signup 3 from csp.decorators import csp_exempt 4 from django.conf.urls import url, include 5 from django.contrib import admin 6 from nextcloudappstore.core.user.views import PasswordView, AccountView, \ 7 APITokenView 8 from nextcloudappstore.core.views import CategoryAppListView, AppDetailView, \ 9 app_description, AppReleasesView, AppUploadView, LegalNoticeView 10 11 urlpatterns = [ 12 url(r'^$', CategoryAppListView.as_view(), {'id': None}, name='home'), 13 url(r"^signup/$", csp_exempt(signup), name="account_signup"), 14 url(r"^social/signup/$", csp_exempt(social_signup), 15 name="socialaccount_signup"), 16 url(r'^', include('allauth.urls')), 17 url(r'^account/?$', AccountView.as_view(), name='account'), 18 url(r'^account/password/?$', PasswordView.as_view(), 19 name='account-password'), 20 url(r'^account/token/?$', APITokenView.as_view(), 21 name='account-api-token'), 22 url(r'^legal/?$', LegalNoticeView.as_view(), name='legal-notice'), 23 url(r'^categories/(?P<id>[\w]*)/?$', CategoryAppListView.as_view(), 24 name='category-app-list'), 25 url(r'^app/upload/?$', AppUploadView.as_view(), name='app-upload'), 26 url(r'^app/(?P<id>[\w_]+)/?$', AppDetailView.as_view(), name='app-detail'), 27 url(r'^app/(?P<id>[\w_]+)/releases/?$', AppReleasesView.as_view(), 28 name='app-releases'), 29 url(r'^app/(?P<id>[\w_]+)/description/?$', app_description, 30 name='app-description'), 31 url(r'^api/', include('nextcloudappstore.core.api.urls', 32 namespace='api')), 33 url(r'^admin/', admin.site.urls), 34 ] 35 ``` Path: `nextcloudappstore/core/user/views.py` Content: ``` 1 from allauth.account.views import PasswordChangeView 2 from django.contrib import messages 3 from django.contrib.auth.mixins import LoginRequiredMixin 4 from django.contrib.auth.models import User 5 from django.core.urlresolvers import reverse_lazy 6 from django.views.generic import TemplateView 7 from django.views.generic import UpdateView 8 9 10 class AccountView(LoginRequiredMixin, UpdateView): 11 """Display and allow changing of the user's name.""" 12 13 template_name = 'user/account.html' 14 template_name_suffix = '' 15 model = User 16 fields = ['first_name', 'last_name'] 17 success_url = reverse_lazy('account') 18 19 def get_context_data(self, **kwargs): 20 context = super().get_context_data(**kwargs) 21 context['acc_page'] = 'account' 22 return context 23 24 def form_valid(self, form): 25 messages.success(self.request, 'Name saved.') 26 return super().form_valid(form) 27 28 def get_object(self, queryset=None): 29 return self.request.user 30 31 32 class PasswordView(LoginRequiredMixin, PasswordChangeView): 33 """Allow the user to change their password.""" 34 35 template_name = 'user/password.html' 36 success_url = reverse_lazy('account-password') 37 38 def get_context_data(self, **kwargs): 39 context = super().get_context_data(**kwargs) 40 context['acc_page'] = 'password' 41 return context 42 43 44 class APITokenView(LoginRequiredMixin, TemplateView): 45 """Display the user's API token, and allow it to be regenerated.""" 46 47 template_name = 'user/api-token.html' 48 49 def get_context_data(self, **kwargs): 50 context = super().get_context_data(**kwargs) 51 context['acc_page'] = 'api-token' 52 return context 53 ``` Path: `nextcloudappstore/core/user/forms.py` Content: ``` 1 from django import forms 2 from captcha.fields import ReCaptchaField 3 4 5 class SignupFormRecaptcha(forms.Form): 6 """integrate a recaptcha field.""" 7 recaptcha = ReCaptchaField() 8 9 def signup(self, request, user): 10 pass 11 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nextcloudappstore/core/user/forms.py b/nextcloudappstore/core/user/forms.py --- a/nextcloudappstore/core/user/forms.py +++ b/nextcloudappstore/core/user/forms.py @@ -1,5 +1,7 @@ from django import forms from captcha.fields import ReCaptchaField +from django.forms import EmailField +from django.utils.translation import ugettext_lazy as _ class SignupFormRecaptcha(forms.Form): @@ -8,3 +10,19 @@ def signup(self, request, user): pass + + +class DeleteAccountForm(forms.Form): + email = EmailField(required=True, label=_('Your e-mail address')) + + def __init__(self, *args, **kwargs): + self.user = kwargs.pop('user', None) + super().__init__(*args, **kwargs) + + def clean_email(self): + email = self.cleaned_data.get('email') + if self.user and self.user.email == email: + return email + else: + raise forms.ValidationError(_( + 'The given e-mail address does not match your e-mail address')) diff --git a/nextcloudappstore/core/user/views.py b/nextcloudappstore/core/user/views.py --- a/nextcloudappstore/core/user/views.py +++ b/nextcloudappstore/core/user/views.py @@ -3,9 +3,30 @@ from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.models import User from django.core.urlresolvers import reverse_lazy +from django.shortcuts import redirect, render from django.views.generic import TemplateView from django.views.generic import UpdateView +from nextcloudappstore.core.user.forms import DeleteAccountForm + + +class DeleteAccountView(LoginRequiredMixin, TemplateView): + template_name = 'user/delete-account.html' + + def get_context_data(self, **kwargs): + context = super().get_context_data(**kwargs) + context['form'] = DeleteAccountForm() + context['acc_page'] = 'delete-account' + return context + + def post(self, request, *args, **kwargs): + form = DeleteAccountForm(request.POST, user=request.user) + if form.is_valid(): + request.user.delete() + return redirect(reverse_lazy('home')) + else: + return render(request, self.template_name, {'form': form}) + class AccountView(LoginRequiredMixin, UpdateView): """Display and allow changing of the user's name.""" diff --git a/nextcloudappstore/urls.py b/nextcloudappstore/urls.py --- a/nextcloudappstore/urls.py +++ b/nextcloudappstore/urls.py @@ -4,7 +4,7 @@ from django.conf.urls import url, include from django.contrib import admin from nextcloudappstore.core.user.views import PasswordView, AccountView, \ - APITokenView + APITokenView, DeleteAccountView from nextcloudappstore.core.views import CategoryAppListView, AppDetailView, \ app_description, AppReleasesView, AppUploadView, LegalNoticeView @@ -19,6 +19,8 @@ name='account-password'), url(r'^account/token/?$', APITokenView.as_view(), name='account-api-token'), + url(r'^account/delete/?$', DeleteAccountView.as_view(), + name='account-deletion'), url(r'^legal/?$', LegalNoticeView.as_view(), name='legal-notice'), url(r'^categories/(?P<id>[\w]*)/?$', CategoryAppListView.as_view(), name='category-app-list'),
{"golden_diff": "diff --git a/nextcloudappstore/core/user/forms.py b/nextcloudappstore/core/user/forms.py\n--- a/nextcloudappstore/core/user/forms.py\n+++ b/nextcloudappstore/core/user/forms.py\n@@ -1,5 +1,7 @@\n from django import forms\n from captcha.fields import ReCaptchaField\n+from django.forms import EmailField\n+from django.utils.translation import ugettext_lazy as _\n \n \n class SignupFormRecaptcha(forms.Form):\n@@ -8,3 +10,19 @@\n \n def signup(self, request, user):\n pass\n+\n+\n+class DeleteAccountForm(forms.Form):\n+ email = EmailField(required=True, label=_('Your e-mail address'))\n+\n+ def __init__(self, *args, **kwargs):\n+ self.user = kwargs.pop('user', None)\n+ super().__init__(*args, **kwargs)\n+\n+ def clean_email(self):\n+ email = self.cleaned_data.get('email')\n+ if self.user and self.user.email == email:\n+ return email\n+ else:\n+ raise forms.ValidationError(_(\n+ 'The given e-mail address does not match your e-mail address'))\ndiff --git a/nextcloudappstore/core/user/views.py b/nextcloudappstore/core/user/views.py\n--- a/nextcloudappstore/core/user/views.py\n+++ b/nextcloudappstore/core/user/views.py\n@@ -3,9 +3,30 @@\n from django.contrib.auth.mixins import LoginRequiredMixin\n from django.contrib.auth.models import User\n from django.core.urlresolvers import reverse_lazy\n+from django.shortcuts import redirect, render\n from django.views.generic import TemplateView\n from django.views.generic import UpdateView\n \n+from nextcloudappstore.core.user.forms import DeleteAccountForm\n+\n+\n+class DeleteAccountView(LoginRequiredMixin, TemplateView):\n+ template_name = 'user/delete-account.html'\n+\n+ def get_context_data(self, **kwargs):\n+ context = super().get_context_data(**kwargs)\n+ context['form'] = DeleteAccountForm()\n+ context['acc_page'] = 'delete-account'\n+ return context\n+\n+ def post(self, request, *args, **kwargs):\n+ form = DeleteAccountForm(request.POST, user=request.user)\n+ if form.is_valid():\n+ request.user.delete()\n+ return redirect(reverse_lazy('home'))\n+ else:\n+ return render(request, self.template_name, {'form': form})\n+\n \n class AccountView(LoginRequiredMixin, UpdateView):\n \"\"\"Display and allow changing of the user's name.\"\"\"\ndiff --git a/nextcloudappstore/urls.py b/nextcloudappstore/urls.py\n--- a/nextcloudappstore/urls.py\n+++ b/nextcloudappstore/urls.py\n@@ -4,7 +4,7 @@\n from django.conf.urls import url, include\n from django.contrib import admin\n from nextcloudappstore.core.user.views import PasswordView, AccountView, \\\n- APITokenView\n+ APITokenView, DeleteAccountView\n from nextcloudappstore.core.views import CategoryAppListView, AppDetailView, \\\n app_description, AppReleasesView, AppUploadView, LegalNoticeView\n \n@@ -19,6 +19,8 @@\n name='account-password'),\n url(r'^account/token/?$', APITokenView.as_view(),\n name='account-api-token'),\n+ url(r'^account/delete/?$', DeleteAccountView.as_view(),\n+ name='account-deletion'),\n url(r'^legal/?$', LegalNoticeView.as_view(), name='legal-notice'),\n url(r'^categories/(?P<id>[\\w]*)/?$', CategoryAppListView.as_view(),\n name='category-app-list'),\n", "issue": "Delete user account\nA user should be able to delete his account by hitting and confirming it on the download page. The confirmation should not be able to trigger by accident, Github's delete repo ui is a good example.\n\nBefore deleting his account, a user will be warned that all his comments and apps will be deleted.\n\n", "before_files": [{"content": "from allauth.account.views import signup\nfrom allauth.socialaccount.views import signup as social_signup\nfrom csp.decorators import csp_exempt\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom nextcloudappstore.core.user.views import PasswordView, AccountView, \\\n APITokenView\nfrom nextcloudappstore.core.views import CategoryAppListView, AppDetailView, \\\n app_description, AppReleasesView, AppUploadView, LegalNoticeView\n\nurlpatterns = [\n url(r'^$', CategoryAppListView.as_view(), {'id': None}, name='home'),\n url(r\"^signup/$\", csp_exempt(signup), name=\"account_signup\"),\n url(r\"^social/signup/$\", csp_exempt(social_signup),\n name=\"socialaccount_signup\"),\n url(r'^', include('allauth.urls')),\n url(r'^account/?$', AccountView.as_view(), name='account'),\n url(r'^account/password/?$', PasswordView.as_view(),\n name='account-password'),\n url(r'^account/token/?$', APITokenView.as_view(),\n name='account-api-token'),\n url(r'^legal/?$', LegalNoticeView.as_view(), name='legal-notice'),\n url(r'^categories/(?P<id>[\\w]*)/?$', CategoryAppListView.as_view(),\n name='category-app-list'),\n url(r'^app/upload/?$', AppUploadView.as_view(), name='app-upload'),\n url(r'^app/(?P<id>[\\w_]+)/?$', AppDetailView.as_view(), name='app-detail'),\n url(r'^app/(?P<id>[\\w_]+)/releases/?$', AppReleasesView.as_view(),\n name='app-releases'),\n url(r'^app/(?P<id>[\\w_]+)/description/?$', app_description,\n name='app-description'),\n url(r'^api/', include('nextcloudappstore.core.api.urls',\n namespace='api')),\n url(r'^admin/', admin.site.urls),\n]\n", "path": "nextcloudappstore/urls.py"}, {"content": "from allauth.account.views import PasswordChangeView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic import TemplateView\nfrom django.views.generic import UpdateView\n\n\nclass AccountView(LoginRequiredMixin, UpdateView):\n \"\"\"Display and allow changing of the user's name.\"\"\"\n\n template_name = 'user/account.html'\n template_name_suffix = ''\n model = User\n fields = ['first_name', 'last_name']\n success_url = reverse_lazy('account')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account'\n return context\n\n def form_valid(self, form):\n messages.success(self.request, 'Name saved.')\n return super().form_valid(form)\n\n def get_object(self, queryset=None):\n return self.request.user\n\n\nclass PasswordView(LoginRequiredMixin, PasswordChangeView):\n \"\"\"Allow the user to change their password.\"\"\"\n\n template_name = 'user/password.html'\n success_url = reverse_lazy('account-password')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'password'\n return context\n\n\nclass APITokenView(LoginRequiredMixin, TemplateView):\n \"\"\"Display the user's API token, and allow it to be regenerated.\"\"\"\n\n template_name = 'user/api-token.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'api-token'\n return context\n", "path": "nextcloudappstore/core/user/views.py"}, {"content": "from django import forms\nfrom captcha.fields import ReCaptchaField\n\n\nclass SignupFormRecaptcha(forms.Form):\n \"\"\"integrate a recaptcha field.\"\"\"\n recaptcha = ReCaptchaField()\n\n def signup(self, request, user):\n pass\n", "path": "nextcloudappstore/core/user/forms.py"}], "after_files": [{"content": "from allauth.account.views import signup\nfrom allauth.socialaccount.views import signup as social_signup\nfrom csp.decorators import csp_exempt\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom nextcloudappstore.core.user.views import PasswordView, AccountView, \\\n APITokenView, DeleteAccountView\nfrom nextcloudappstore.core.views import CategoryAppListView, AppDetailView, \\\n app_description, AppReleasesView, AppUploadView, LegalNoticeView\n\nurlpatterns = [\n url(r'^$', CategoryAppListView.as_view(), {'id': None}, name='home'),\n url(r\"^signup/$\", csp_exempt(signup), name=\"account_signup\"),\n url(r\"^social/signup/$\", csp_exempt(social_signup),\n name=\"socialaccount_signup\"),\n url(r'^', include('allauth.urls')),\n url(r'^account/?$', AccountView.as_view(), name='account'),\n url(r'^account/password/?$', PasswordView.as_view(),\n name='account-password'),\n url(r'^account/token/?$', APITokenView.as_view(),\n name='account-api-token'),\n url(r'^account/delete/?$', DeleteAccountView.as_view(),\n name='account-deletion'),\n url(r'^legal/?$', LegalNoticeView.as_view(), name='legal-notice'),\n url(r'^categories/(?P<id>[\\w]*)/?$', CategoryAppListView.as_view(),\n name='category-app-list'),\n url(r'^app/upload/?$', AppUploadView.as_view(), name='app-upload'),\n url(r'^app/(?P<id>[\\w_]+)/?$', AppDetailView.as_view(), name='app-detail'),\n url(r'^app/(?P<id>[\\w_]+)/releases/?$', AppReleasesView.as_view(),\n name='app-releases'),\n url(r'^app/(?P<id>[\\w_]+)/description/?$', app_description,\n name='app-description'),\n url(r'^api/', include('nextcloudappstore.core.api.urls',\n namespace='api')),\n url(r'^admin/', admin.site.urls),\n]\n", "path": "nextcloudappstore/urls.py"}, {"content": "from allauth.account.views import PasswordChangeView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import redirect, render\nfrom django.views.generic import TemplateView\nfrom django.views.generic import UpdateView\n\nfrom nextcloudappstore.core.user.forms import DeleteAccountForm\n\n\nclass DeleteAccountView(LoginRequiredMixin, TemplateView):\n template_name = 'user/delete-account.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = DeleteAccountForm()\n context['acc_page'] = 'delete-account'\n return context\n\n def post(self, request, *args, **kwargs):\n form = DeleteAccountForm(request.POST, user=request.user)\n if form.is_valid():\n request.user.delete()\n return redirect(reverse_lazy('home'))\n else:\n return render(request, self.template_name, {'form': form})\n\n\nclass AccountView(LoginRequiredMixin, UpdateView):\n \"\"\"Display and allow changing of the user's name.\"\"\"\n\n template_name = 'user/account.html'\n template_name_suffix = ''\n model = User\n fields = ['first_name', 'last_name']\n success_url = reverse_lazy('account')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account'\n return context\n\n def form_valid(self, form):\n messages.success(self.request, 'Name saved.')\n return super().form_valid(form)\n\n def get_object(self, queryset=None):\n return self.request.user\n\n\nclass PasswordView(LoginRequiredMixin, PasswordChangeView):\n \"\"\"Allow the user to change their password.\"\"\"\n\n template_name = 'user/password.html'\n success_url = reverse_lazy('account-password')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'password'\n return context\n\n\nclass APITokenView(LoginRequiredMixin, TemplateView):\n \"\"\"Display the user's API token, and allow it to be regenerated.\"\"\"\n\n template_name = 'user/api-token.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'api-token'\n return context\n", "path": "nextcloudappstore/core/user/views.py"}, {"content": "from django import forms\nfrom captcha.fields import ReCaptchaField\nfrom django.forms import EmailField\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass SignupFormRecaptcha(forms.Form):\n \"\"\"integrate a recaptcha field.\"\"\"\n recaptcha = ReCaptchaField()\n\n def signup(self, request, user):\n pass\n\n\nclass DeleteAccountForm(forms.Form):\n email = EmailField(required=True, label=_('Your e-mail address'))\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super().__init__(*args, **kwargs)\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n if self.user and self.user.email == email:\n return email\n else:\n raise forms.ValidationError(_(\n 'The given e-mail address does not match your e-mail address'))\n", "path": "nextcloudappstore/core/user/forms.py"}]}
1,359
793
gh_patches_debug_27720
rasdani/github-patches
git_diff
scikit-hep__pyhf-383
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use Binder Build API for Builds in PRs # Description After a discussion with @minrk and @betatim on the jupyterhub/binder Gitter, it was made clear that the use of Selenium in [`binder/trigger_binder.py`](https://github.com/diana-hep/pyhf/blob/c81f6007309f4c13241f9efac187594337d0bd08/binder/trigger_binder.py) (and the script itself) is unnecessary. Instead a simple API call can be made just using Python's `webbrowser` with an [endpoint of the form `https://mybinder.org/build/gh/owner/repo/ref`](https://gitter.im/jupyterhub/binder?at=5c2f87038dafa715c73ff54f) as can be [seen in the Binder Hub demo](https://github.com/jupyterhub/binderhub/blob/9ca8fa68bb8b69c6a2736f2275279583073f314f/examples/binder-api.py#L28) (thanks Tim for the link). So, for example ``` python -m webbrowser "https://mybinder.org/build/gh/diana-hep/pyhf/master" ``` So asking [WWKHTD](https://github.com/kelseyhightower/nocode), this means that `binder/trigger_binder.py` is unnecessary and should be removed and `.travis.yml` should be updated to use the API calls. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `binder/trigger_binder.py` Content: ``` 1 #!/usr/bin/env python 2 3 import argparse 4 from contextlib import contextmanager 5 from selenium import webdriver 6 from selenium.webdriver.chrome.options import Options 7 from selenium.webdriver.support.ui import WebDriverWait 8 from selenium.webdriver.support.expected_conditions import staleness_of 9 10 11 class SeleniumSession: 12 def __init__(self, args): 13 self.options = Options() 14 self.options.set_headless() 15 self.options.add_argument('--no-sandbox') 16 if args.chromedriver_path is not None: 17 self.browser = webdriver.Chrome( 18 args.chromedriver_path, chrome_options=self.options 19 ) 20 else: 21 self.browser = webdriver.Chrome(chrome_options=self.options) 22 23 @contextmanager 24 def wait_for_page_load(self, timeout=20): 25 old_page = self.browser.find_element_by_tag_name('html') 26 yield 27 WebDriverWait(self.browser, timeout).until(staleness_of(old_page)) 28 29 def trigger_binder(self, url): 30 with self.wait_for_page_load(): 31 self.browser.get(url) 32 33 34 def main(args): 35 driver = SeleniumSession(args) 36 if args.is_verbose: 37 print('Chrome Headless Browser Invoked') 38 driver.trigger_binder(args.url) 39 40 41 if __name__ == '__main__': 42 parser = argparse.ArgumentParser() 43 parser.add_argument( 44 '-v', 45 '--verbose', 46 dest='is_verbose', 47 action='store_true', 48 help='Print out more information', 49 ) 50 parser.add_argument( 51 '--chromedriver-path', 52 dest='chromedriver_path', 53 type=str, 54 default=None, 55 help='System path to ChromeDriver', 56 ) 57 parser.add_argument( 58 '--url', dest='url', type=str, default=None, help='URL for Selinium to open' 59 ) 60 args = parser.parse_args() 61 62 main(args) 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/binder/trigger_binder.py b/binder/trigger_binder.py deleted file mode 100644 --- a/binder/trigger_binder.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python - -import argparse -from contextlib import contextmanager -from selenium import webdriver -from selenium.webdriver.chrome.options import Options -from selenium.webdriver.support.ui import WebDriverWait -from selenium.webdriver.support.expected_conditions import staleness_of - - -class SeleniumSession: - def __init__(self, args): - self.options = Options() - self.options.set_headless() - self.options.add_argument('--no-sandbox') - if args.chromedriver_path is not None: - self.browser = webdriver.Chrome( - args.chromedriver_path, chrome_options=self.options - ) - else: - self.browser = webdriver.Chrome(chrome_options=self.options) - - @contextmanager - def wait_for_page_load(self, timeout=20): - old_page = self.browser.find_element_by_tag_name('html') - yield - WebDriverWait(self.browser, timeout).until(staleness_of(old_page)) - - def trigger_binder(self, url): - with self.wait_for_page_load(): - self.browser.get(url) - - -def main(args): - driver = SeleniumSession(args) - if args.is_verbose: - print('Chrome Headless Browser Invoked') - driver.trigger_binder(args.url) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument( - '-v', - '--verbose', - dest='is_verbose', - action='store_true', - help='Print out more information', - ) - parser.add_argument( - '--chromedriver-path', - dest='chromedriver_path', - type=str, - default=None, - help='System path to ChromeDriver', - ) - parser.add_argument( - '--url', dest='url', type=str, default=None, help='URL for Selinium to open' - ) - args = parser.parse_args() - - main(args)
{"golden_diff": "diff --git a/binder/trigger_binder.py b/binder/trigger_binder.py\ndeleted file mode 100644\n--- a/binder/trigger_binder.py\n+++ /dev/null\n@@ -1,62 +0,0 @@\n-#!/usr/bin/env python\n-\n-import argparse\n-from contextlib import contextmanager\n-from selenium import webdriver\n-from selenium.webdriver.chrome.options import Options\n-from selenium.webdriver.support.ui import WebDriverWait\n-from selenium.webdriver.support.expected_conditions import staleness_of\n-\n-\n-class SeleniumSession:\n- def __init__(self, args):\n- self.options = Options()\n- self.options.set_headless()\n- self.options.add_argument('--no-sandbox')\n- if args.chromedriver_path is not None:\n- self.browser = webdriver.Chrome(\n- args.chromedriver_path, chrome_options=self.options\n- )\n- else:\n- self.browser = webdriver.Chrome(chrome_options=self.options)\n-\n- @contextmanager\n- def wait_for_page_load(self, timeout=20):\n- old_page = self.browser.find_element_by_tag_name('html')\n- yield\n- WebDriverWait(self.browser, timeout).until(staleness_of(old_page))\n-\n- def trigger_binder(self, url):\n- with self.wait_for_page_load():\n- self.browser.get(url)\n-\n-\n-def main(args):\n- driver = SeleniumSession(args)\n- if args.is_verbose:\n- print('Chrome Headless Browser Invoked')\n- driver.trigger_binder(args.url)\n-\n-\n-if __name__ == '__main__':\n- parser = argparse.ArgumentParser()\n- parser.add_argument(\n- '-v',\n- '--verbose',\n- dest='is_verbose',\n- action='store_true',\n- help='Print out more information',\n- )\n- parser.add_argument(\n- '--chromedriver-path',\n- dest='chromedriver_path',\n- type=str,\n- default=None,\n- help='System path to ChromeDriver',\n- )\n- parser.add_argument(\n- '--url', dest='url', type=str, default=None, help='URL for Selinium to open'\n- )\n- args = parser.parse_args()\n-\n- main(args)\n", "issue": "Use Binder Build API for Builds in PRs\n# Description\r\n\r\nAfter a discussion with @minrk and @betatim on the jupyterhub/binder Gitter, it was made clear that the use of Selenium in [`binder/trigger_binder.py`](https://github.com/diana-hep/pyhf/blob/c81f6007309f4c13241f9efac187594337d0bd08/binder/trigger_binder.py) (and the script itself) is unnecessary. Instead a simple API call can be made just using Python's `webbrowser` with an [endpoint of the form `https://mybinder.org/build/gh/owner/repo/ref`](https://gitter.im/jupyterhub/binder?at=5c2f87038dafa715c73ff54f) as can be [seen in the Binder Hub demo](https://github.com/jupyterhub/binderhub/blob/9ca8fa68bb8b69c6a2736f2275279583073f314f/examples/binder-api.py#L28) (thanks Tim for the link).\r\n\r\nSo, for example\r\n\r\n```\r\npython -m webbrowser \"https://mybinder.org/build/gh/diana-hep/pyhf/master\"\r\n```\r\n\r\nSo asking [WWKHTD](https://github.com/kelseyhightower/nocode), this means that `binder/trigger_binder.py` is unnecessary and should be removed and `.travis.yml` should be updated to use the API calls.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport argparse\nfrom contextlib import contextmanager\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.expected_conditions import staleness_of\n\n\nclass SeleniumSession:\n def __init__(self, args):\n self.options = Options()\n self.options.set_headless()\n self.options.add_argument('--no-sandbox')\n if args.chromedriver_path is not None:\n self.browser = webdriver.Chrome(\n args.chromedriver_path, chrome_options=self.options\n )\n else:\n self.browser = webdriver.Chrome(chrome_options=self.options)\n\n @contextmanager\n def wait_for_page_load(self, timeout=20):\n old_page = self.browser.find_element_by_tag_name('html')\n yield\n WebDriverWait(self.browser, timeout).until(staleness_of(old_page))\n\n def trigger_binder(self, url):\n with self.wait_for_page_load():\n self.browser.get(url)\n\n\ndef main(args):\n driver = SeleniumSession(args)\n if args.is_verbose:\n print('Chrome Headless Browser Invoked')\n driver.trigger_binder(args.url)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-v',\n '--verbose',\n dest='is_verbose',\n action='store_true',\n help='Print out more information',\n )\n parser.add_argument(\n '--chromedriver-path',\n dest='chromedriver_path',\n type=str,\n default=None,\n help='System path to ChromeDriver',\n )\n parser.add_argument(\n '--url', dest='url', type=str, default=None, help='URL for Selinium to open'\n )\n args = parser.parse_args()\n\n main(args)\n", "path": "binder/trigger_binder.py"}], "after_files": [{"content": null, "path": "binder/trigger_binder.py"}]}
1,111
476
gh_patches_debug_14777
rasdani/github-patches
git_diff
Mailu__Mailu-1941
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Letsencrypt Force Renewal Is there a limit on the Subject Alt Name entries? I have updated my /mailu/mailu.env "HOSTNAMES" variable, but when I restart Mailu it doesn't update the Subject Alt Names on the mailu cert. Previously it has worked, so I am guessing that I need to force Letsencrypt to refresh as it isnt within the renewal window. But there is no guidance for the new letsencrypt certbot. I am using the latest Mailu version (1.7) and this is the command I am using to restart mailu '/mailu/docker-compose -p mailu up -d' Letsencrypt Force Renewal Is there a limit on the Subject Alt Name entries? I have updated my /mailu/mailu.env "HOSTNAMES" variable, but when I restart Mailu it doesn't update the Subject Alt Names on the mailu cert. Previously it has worked, so I am guessing that I need to force Letsencrypt to refresh as it isnt within the renewal window. But there is no guidance for the new letsencrypt certbot. I am using the latest Mailu version (1.7) and this is the command I am using to restart mailu '/mailu/docker-compose -p mailu up -d' --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/nginx/letsencrypt.py` Content: ``` 1 #!/usr/bin/python3 2 3 import os 4 import time 5 import subprocess 6 7 command = [ 8 "certbot", 9 "-n", "--agree-tos", # non-interactive 10 "-d", os.environ["HOSTNAMES"], 11 "-m", "{}@{}".format(os.environ["POSTMASTER"], os.environ["DOMAIN"]), 12 "certonly", "--standalone", 13 "--cert-name", "mailu", 14 "--preferred-challenges", "http", "--http-01-port", "8008", 15 "--keep-until-expiring", 16 "--config-dir", "/certs/letsencrypt", 17 "--post-hook", "/config.py" 18 ] 19 command2 = [ 20 "certbot", 21 "-n", "--agree-tos", # non-interactive 22 "-d", os.environ["HOSTNAMES"], 23 "-m", "{}@{}".format(os.environ["POSTMASTER"], os.environ["DOMAIN"]), 24 "certonly", "--standalone", 25 "--cert-name", "mailu-ecdsa", 26 "--preferred-challenges", "http", "--http-01-port", "8008", 27 "--keep-until-expiring", 28 "--key-type", "ecdsa", 29 "--config-dir", "/certs/letsencrypt", 30 "--post-hook", "/config.py" 31 ] 32 33 def format_for_nginx(fullchain, output): 34 """ We may want to strip ISRG Root X1 out 35 """ 36 certs = [] 37 with open(fullchain, 'r') as pem: 38 cert = '' 39 for line in pem: 40 cert += line 41 if '-----END CERTIFICATE-----' in line: 42 certs += [cert] 43 cert = '' 44 with open(output, 'w') as pem: 45 for cert in certs[:-1] if len(certs)>2 and os.getenv('LETSENCRYPT_SHORTCHAIN', default="False") else certs: 46 pem.write(cert) 47 48 # Wait for nginx to start 49 time.sleep(5) 50 51 # Run certbot every day 52 while True: 53 subprocess.call(command) 54 format_for_nginx('/certs/letsencrypt/live/mailu/fullchain.pem', '/certs/letsencrypt/live/mailu/nginx-chain.pem') 55 subprocess.call(command2) 56 format_for_nginx('/certs/letsencrypt/live/mailu-ecdsa/fullchain.pem', '/certs/letsencrypt/live/mailu-ecdsa/nginx-chain.pem') 57 time.sleep(86400) 58 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py --- a/core/nginx/letsencrypt.py +++ b/core/nginx/letsencrypt.py @@ -13,6 +13,7 @@ "--cert-name", "mailu", "--preferred-challenges", "http", "--http-01-port", "8008", "--keep-until-expiring", + "--renew-with-new-domains", "--config-dir", "/certs/letsencrypt", "--post-hook", "/config.py" ] @@ -26,6 +27,7 @@ "--preferred-challenges", "http", "--http-01-port", "8008", "--keep-until-expiring", "--key-type", "ecdsa", + "--renew-with-new-domains", "--config-dir", "/certs/letsencrypt", "--post-hook", "/config.py" ]
{"golden_diff": "diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py\n--- a/core/nginx/letsencrypt.py\n+++ b/core/nginx/letsencrypt.py\n@@ -13,6 +13,7 @@\n \"--cert-name\", \"mailu\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n+ \"--renew-with-new-domains\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n ]\n@@ -26,6 +27,7 @@\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--key-type\", \"ecdsa\",\n+ \"--renew-with-new-domains\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n ]\n", "issue": "Letsencrypt Force Renewal\nIs there a limit on the Subject Alt Name entries?\r\n\r\nI have updated my /mailu/mailu.env \"HOSTNAMES\" variable, but when I restart Mailu it doesn't update the Subject Alt Names on the mailu cert.\r\n\r\nPreviously it has worked, so I am guessing that I need to force Letsencrypt to refresh as it isnt within the renewal window. But there is no guidance for the new letsencrypt certbot.\r\n\r\nI am using the latest Mailu version (1.7) and this is the command I am using to restart mailu '/mailu/docker-compose -p mailu up -d'\nLetsencrypt Force Renewal\nIs there a limit on the Subject Alt Name entries?\r\n\r\nI have updated my /mailu/mailu.env \"HOSTNAMES\" variable, but when I restart Mailu it doesn't update the Subject Alt Names on the mailu cert.\r\n\r\nPreviously it has worked, so I am guessing that I need to force Letsencrypt to refresh as it isnt within the renewal window. But there is no guidance for the new letsencrypt certbot.\r\n\r\nI am using the latest Mailu version (1.7) and this is the command I am using to restart mailu '/mailu/docker-compose -p mailu up -d'\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport time\nimport subprocess\n\ncommand = [\n \"certbot\",\n \"-n\", \"--agree-tos\", # non-interactive\n \"-d\", os.environ[\"HOSTNAMES\"],\n \"-m\", \"{}@{}\".format(os.environ[\"POSTMASTER\"], os.environ[\"DOMAIN\"]),\n \"certonly\", \"--standalone\",\n \"--cert-name\", \"mailu\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n]\ncommand2 = [\n \"certbot\",\n \"-n\", \"--agree-tos\", # non-interactive\n \"-d\", os.environ[\"HOSTNAMES\"],\n \"-m\", \"{}@{}\".format(os.environ[\"POSTMASTER\"], os.environ[\"DOMAIN\"]),\n \"certonly\", \"--standalone\",\n \"--cert-name\", \"mailu-ecdsa\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--key-type\", \"ecdsa\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n]\n\ndef format_for_nginx(fullchain, output):\n \"\"\" We may want to strip ISRG Root X1 out\n \"\"\"\n certs = []\n with open(fullchain, 'r') as pem:\n cert = ''\n for line in pem:\n cert += line\n if '-----END CERTIFICATE-----' in line:\n certs += [cert]\n cert = ''\n with open(output, 'w') as pem:\n for cert in certs[:-1] if len(certs)>2 and os.getenv('LETSENCRYPT_SHORTCHAIN', default=\"False\") else certs:\n pem.write(cert)\n\n# Wait for nginx to start\ntime.sleep(5)\n\n# Run certbot every day\nwhile True:\n subprocess.call(command)\n format_for_nginx('/certs/letsencrypt/live/mailu/fullchain.pem', '/certs/letsencrypt/live/mailu/nginx-chain.pem')\n subprocess.call(command2)\n format_for_nginx('/certs/letsencrypt/live/mailu-ecdsa/fullchain.pem', '/certs/letsencrypt/live/mailu-ecdsa/nginx-chain.pem')\n time.sleep(86400)\n", "path": "core/nginx/letsencrypt.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport time\nimport subprocess\n\ncommand = [\n \"certbot\",\n \"-n\", \"--agree-tos\", # non-interactive\n \"-d\", os.environ[\"HOSTNAMES\"],\n \"-m\", \"{}@{}\".format(os.environ[\"POSTMASTER\"], os.environ[\"DOMAIN\"]),\n \"certonly\", \"--standalone\",\n \"--cert-name\", \"mailu\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--renew-with-new-domains\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n]\ncommand2 = [\n \"certbot\",\n \"-n\", \"--agree-tos\", # non-interactive\n \"-d\", os.environ[\"HOSTNAMES\"],\n \"-m\", \"{}@{}\".format(os.environ[\"POSTMASTER\"], os.environ[\"DOMAIN\"]),\n \"certonly\", \"--standalone\",\n \"--cert-name\", \"mailu-ecdsa\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--key-type\", \"ecdsa\",\n \"--renew-with-new-domains\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n]\n\ndef format_for_nginx(fullchain, output):\n \"\"\" We may want to strip ISRG Root X1 out\n \"\"\"\n certs = []\n with open(fullchain, 'r') as pem:\n cert = ''\n for line in pem:\n cert += line\n if '-----END CERTIFICATE-----' in line:\n certs += [cert]\n cert = ''\n with open(output, 'w') as pem:\n for cert in certs[:-1] if len(certs)>2 and os.getenv('LETSENCRYPT_SHORTCHAIN', default=\"False\") else certs:\n pem.write(cert)\n\n# Wait for nginx to start\ntime.sleep(5)\n\n# Run certbot every day\nwhile True:\n subprocess.call(command)\n format_for_nginx('/certs/letsencrypt/live/mailu/fullchain.pem', '/certs/letsencrypt/live/mailu/nginx-chain.pem')\n subprocess.call(command2)\n format_for_nginx('/certs/letsencrypt/live/mailu-ecdsa/fullchain.pem', '/certs/letsencrypt/live/mailu-ecdsa/nginx-chain.pem')\n time.sleep(86400)\n", "path": "core/nginx/letsencrypt.py"}]}
1,146
202
gh_patches_debug_13333
rasdani/github-patches
git_diff
DDMAL__CantusDB-156
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- need to have fields of chant and sequence models synced or else, it'll lead to errors, such as not being able to retrieve chants from /chant-search please fix --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django/cantusdb_project/main_app/models/sequence.py` Content: ``` 1 from django.contrib.postgres.search import SearchVectorField 2 from django.db import models 3 from main_app.models import BaseModel 4 from users.models import User 5 6 7 class Sequence(BaseModel): 8 visible_status = models.CharField(max_length=1, blank=True, null=True) 9 title = models.CharField(blank=True, null=True, max_length=255) 10 incipit = models.CharField(blank=True, null=True, max_length=255) 11 siglum = models.CharField(blank=True, null=True, max_length=255) 12 folio = models.CharField(blank=True, null=True, max_length=255) 13 sequence = models.CharField(blank=True, null=True, max_length=255) 14 genre = models.ForeignKey("Genre", blank=True, null=True, on_delete=models.PROTECT) 15 rubrics = models.CharField(blank=True, null=True, max_length=255) 16 analecta_hymnica = models.CharField(blank=True, null=True, max_length=255) 17 indexing_notes = models.TextField(blank=True, null=True) 18 date = models.CharField(blank=True, null=True, max_length=255) 19 col1 = models.CharField(blank=True, null=True, max_length=255) 20 col2 = models.CharField(blank=True, null=True, max_length=255) 21 col3 = models.CharField(blank=True, null=True, max_length=255) 22 ah_volume = models.CharField(blank=True, null=True, max_length=255) 23 source = models.ForeignKey( 24 "Source", on_delete=models.PROTECT, blank=True, null=True 25 ) 26 cantus_id = models.CharField(blank=True, null=True, max_length=255) 27 image_link = models.URLField(blank=True, null=True) 28 json_info = models.JSONField(null=True, blank=True) 29 30 # The following fields (dummy fields) are just for harmonizing the chant and sequence models to have the same fields 31 # They should never be populated or displayed 32 # The order of the fields must be exactly the same between the seq and chant models 33 marginalia = models.CharField(max_length=63, null=True, blank=True) 34 sequence_number = models.PositiveIntegerField( 35 help_text='Each folio starts with "1"', null=True, blank=True 36 ) 37 office = models.ForeignKey( 38 "Office", on_delete=models.PROTECT, null=True, blank=True 39 ) 40 position = models.CharField(max_length=63, null=True, blank=True) 41 feast = models.ForeignKey("Feast", on_delete=models.PROTECT, null=True, blank=True) 42 mode = models.CharField(max_length=63, null=True, blank=True) 43 differentia = models.CharField(blank=True, null=True, max_length=63) 44 finalis = models.CharField(blank=True, null=True, max_length=63) 45 extra = models.CharField(blank=True, null=True, max_length=63) 46 chant_range = models.CharField( 47 blank=True, 48 null=True, 49 help_text='Example: "1-c-k-4". Optional field', 50 max_length=255, 51 ) 52 addendum = models.CharField(blank=True, null=True, max_length=255) 53 manuscript_full_text_std_spelling = models.TextField( 54 help_text="Manuscript full text with standardized spelling. Enter the words " 55 "according to the manuscript but normalize their spellings following " 56 "Classical Latin forms. Use upper-case letters for proper nouns, " 57 'the first word of each chant, and the first word after "Alleluia" for ' 58 "Mass Alleluias. Punctuation is omitted.", 59 null=True, 60 blank=True, 61 ) 62 manuscript_full_text_std_proofread = models.BooleanField(blank=True, null=True) 63 manuscript_full_text = models.TextField( 64 help_text="Enter the wording, word order and spellings as found in the manuscript" 65 ", with abbreviations resolved to standard words. Use upper-case letters as found" 66 " in the source. Retain “Xpistum” (Christum), “Ihc” (Jesus) and other instances of " 67 "Greek characters with their closest approximations of Latin letters. Some punctuation" 68 " signs and vertical dividing lines | are employed in this field. Repetenda and psalm " 69 "cues can also be recorded here. For more information, contact Cantus Database staff.", 70 null=True, 71 blank=True, 72 ) 73 manuscript_full_text_proofread = models.BooleanField(blank=True, null=True) 74 manuscript_syllabized_full_text = models.TextField(null=True, blank=True) 75 volpiano = models.TextField(null=True, blank=True) 76 volpiano_proofread = models.BooleanField(blank=True, null=True) 77 volpiano_notes = models.TextField(null=True, blank=True) 78 volpiano_intervals = models.TextField(null=True, blank=True) 79 # volpiano_intervals = ArrayField(base_field=models.IntegerField(), null=True, blank=True) 80 cao_concordances = models.CharField(blank=True, null=True, max_length=63) 81 proofread_by = models.ForeignKey( 82 User, on_delete=models.PROTECT, null=True, blank=True 83 ) 84 melody_id = models.CharField(blank=True, null=True, max_length=63) 85 search_vector = SearchVectorField(null=True, editable=False) 86 content_structure = models.CharField( 87 blank=True, 88 null=True, 89 max_length=64, 90 help_text="Additional folio number field, if folio numbers appear on the leaves but are not in the 'binding order'.", 91 ) 92 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django/cantusdb_project/main_app/models/sequence.py b/django/cantusdb_project/main_app/models/sequence.py --- a/django/cantusdb_project/main_app/models/sequence.py +++ b/django/cantusdb_project/main_app/models/sequence.py @@ -41,6 +41,7 @@ feast = models.ForeignKey("Feast", on_delete=models.PROTECT, null=True, blank=True) mode = models.CharField(max_length=63, null=True, blank=True) differentia = models.CharField(blank=True, null=True, max_length=63) + differentia_id = models.CharField(blank=True, null=True, max_length=12) finalis = models.CharField(blank=True, null=True, max_length=63) extra = models.CharField(blank=True, null=True, max_length=63) chant_range = models.CharField(
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/models/sequence.py b/django/cantusdb_project/main_app/models/sequence.py\n--- a/django/cantusdb_project/main_app/models/sequence.py\n+++ b/django/cantusdb_project/main_app/models/sequence.py\n@@ -41,6 +41,7 @@\n feast = models.ForeignKey(\"Feast\", on_delete=models.PROTECT, null=True, blank=True)\n mode = models.CharField(max_length=63, null=True, blank=True)\n differentia = models.CharField(blank=True, null=True, max_length=63)\n+ differentia_id = models.CharField(blank=True, null=True, max_length=12)\n finalis = models.CharField(blank=True, null=True, max_length=63)\n extra = models.CharField(blank=True, null=True, max_length=63)\n chant_range = models.CharField(\n", "issue": "need to have fields of chant and sequence models synced \nor else, it'll lead to errors, such as not being able to retrieve chants from /chant-search\r\nplease fix\n", "before_files": [{"content": "from django.contrib.postgres.search import SearchVectorField\nfrom django.db import models\nfrom main_app.models import BaseModel\nfrom users.models import User\n\n\nclass Sequence(BaseModel):\n visible_status = models.CharField(max_length=1, blank=True, null=True)\n title = models.CharField(blank=True, null=True, max_length=255)\n incipit = models.CharField(blank=True, null=True, max_length=255)\n siglum = models.CharField(blank=True, null=True, max_length=255)\n folio = models.CharField(blank=True, null=True, max_length=255)\n sequence = models.CharField(blank=True, null=True, max_length=255)\n genre = models.ForeignKey(\"Genre\", blank=True, null=True, on_delete=models.PROTECT)\n rubrics = models.CharField(blank=True, null=True, max_length=255)\n analecta_hymnica = models.CharField(blank=True, null=True, max_length=255)\n indexing_notes = models.TextField(blank=True, null=True)\n date = models.CharField(blank=True, null=True, max_length=255)\n col1 = models.CharField(blank=True, null=True, max_length=255)\n col2 = models.CharField(blank=True, null=True, max_length=255)\n col3 = models.CharField(blank=True, null=True, max_length=255)\n ah_volume = models.CharField(blank=True, null=True, max_length=255)\n source = models.ForeignKey(\n \"Source\", on_delete=models.PROTECT, blank=True, null=True\n )\n cantus_id = models.CharField(blank=True, null=True, max_length=255)\n image_link = models.URLField(blank=True, null=True)\n json_info = models.JSONField(null=True, blank=True)\n\n # The following fields (dummy fields) are just for harmonizing the chant and sequence models to have the same fields\n # They should never be populated or displayed\n # The order of the fields must be exactly the same between the seq and chant models\n marginalia = models.CharField(max_length=63, null=True, blank=True)\n sequence_number = models.PositiveIntegerField(\n help_text='Each folio starts with \"1\"', null=True, blank=True\n )\n office = models.ForeignKey(\n \"Office\", on_delete=models.PROTECT, null=True, blank=True\n )\n position = models.CharField(max_length=63, null=True, blank=True)\n feast = models.ForeignKey(\"Feast\", on_delete=models.PROTECT, null=True, blank=True)\n mode = models.CharField(max_length=63, null=True, blank=True)\n differentia = models.CharField(blank=True, null=True, max_length=63)\n finalis = models.CharField(blank=True, null=True, max_length=63)\n extra = models.CharField(blank=True, null=True, max_length=63)\n chant_range = models.CharField(\n blank=True,\n null=True,\n help_text='Example: \"1-c-k-4\". Optional field',\n max_length=255,\n )\n addendum = models.CharField(blank=True, null=True, max_length=255)\n manuscript_full_text_std_spelling = models.TextField(\n help_text=\"Manuscript full text with standardized spelling. Enter the words \"\n \"according to the manuscript but normalize their spellings following \"\n \"Classical Latin forms. Use upper-case letters for proper nouns, \"\n 'the first word of each chant, and the first word after \"Alleluia\" for '\n \"Mass Alleluias. Punctuation is omitted.\",\n null=True,\n blank=True,\n )\n manuscript_full_text_std_proofread = models.BooleanField(blank=True, null=True)\n manuscript_full_text = models.TextField(\n help_text=\"Enter the wording, word order and spellings as found in the manuscript\"\n \", with abbreviations resolved to standard words. Use upper-case letters as found\"\n \" in the source. Retain \u201cXpistum\u201d (Christum), \u201cIhc\u201d (Jesus) and other instances of \"\n \"Greek characters with their closest approximations of Latin letters. Some punctuation\"\n \" signs and vertical dividing lines | are employed in this field. Repetenda and psalm \"\n \"cues can also be recorded here. For more information, contact Cantus Database staff.\",\n null=True,\n blank=True,\n )\n manuscript_full_text_proofread = models.BooleanField(blank=True, null=True)\n manuscript_syllabized_full_text = models.TextField(null=True, blank=True)\n volpiano = models.TextField(null=True, blank=True)\n volpiano_proofread = models.BooleanField(blank=True, null=True)\n volpiano_notes = models.TextField(null=True, blank=True)\n volpiano_intervals = models.TextField(null=True, blank=True)\n # volpiano_intervals = ArrayField(base_field=models.IntegerField(), null=True, blank=True)\n cao_concordances = models.CharField(blank=True, null=True, max_length=63)\n proofread_by = models.ForeignKey(\n User, on_delete=models.PROTECT, null=True, blank=True\n )\n melody_id = models.CharField(blank=True, null=True, max_length=63)\n search_vector = SearchVectorField(null=True, editable=False)\n content_structure = models.CharField(\n blank=True,\n null=True,\n max_length=64,\n help_text=\"Additional folio number field, if folio numbers appear on the leaves but are not in the 'binding order'.\",\n )\n", "path": "django/cantusdb_project/main_app/models/sequence.py"}], "after_files": [{"content": "from django.contrib.postgres.search import SearchVectorField\nfrom django.db import models\nfrom main_app.models import BaseModel\nfrom users.models import User\n\n\nclass Sequence(BaseModel):\n visible_status = models.CharField(max_length=1, blank=True, null=True)\n title = models.CharField(blank=True, null=True, max_length=255)\n incipit = models.CharField(blank=True, null=True, max_length=255)\n siglum = models.CharField(blank=True, null=True, max_length=255)\n folio = models.CharField(blank=True, null=True, max_length=255)\n sequence = models.CharField(blank=True, null=True, max_length=255)\n genre = models.ForeignKey(\"Genre\", blank=True, null=True, on_delete=models.PROTECT)\n rubrics = models.CharField(blank=True, null=True, max_length=255)\n analecta_hymnica = models.CharField(blank=True, null=True, max_length=255)\n indexing_notes = models.TextField(blank=True, null=True)\n date = models.CharField(blank=True, null=True, max_length=255)\n col1 = models.CharField(blank=True, null=True, max_length=255)\n col2 = models.CharField(blank=True, null=True, max_length=255)\n col3 = models.CharField(blank=True, null=True, max_length=255)\n ah_volume = models.CharField(blank=True, null=True, max_length=255)\n source = models.ForeignKey(\n \"Source\", on_delete=models.PROTECT, blank=True, null=True\n )\n cantus_id = models.CharField(blank=True, null=True, max_length=255)\n image_link = models.URLField(blank=True, null=True)\n json_info = models.JSONField(null=True, blank=True)\n\n # The following fields (dummy fields) are just for harmonizing the chant and sequence models to have the same fields\n # They should never be populated or displayed\n # The order of the fields must be exactly the same between the seq and chant models\n marginalia = models.CharField(max_length=63, null=True, blank=True)\n sequence_number = models.PositiveIntegerField(\n help_text='Each folio starts with \"1\"', null=True, blank=True\n )\n office = models.ForeignKey(\n \"Office\", on_delete=models.PROTECT, null=True, blank=True\n )\n position = models.CharField(max_length=63, null=True, blank=True)\n feast = models.ForeignKey(\"Feast\", on_delete=models.PROTECT, null=True, blank=True)\n mode = models.CharField(max_length=63, null=True, blank=True)\n differentia = models.CharField(blank=True, null=True, max_length=63)\n differentia_id = models.CharField(blank=True, null=True, max_length=12)\n finalis = models.CharField(blank=True, null=True, max_length=63)\n extra = models.CharField(blank=True, null=True, max_length=63)\n chant_range = models.CharField(\n blank=True,\n null=True,\n help_text='Example: \"1-c-k-4\". Optional field',\n max_length=255,\n )\n addendum = models.CharField(blank=True, null=True, max_length=255)\n manuscript_full_text_std_spelling = models.TextField(\n help_text=\"Manuscript full text with standardized spelling. Enter the words \"\n \"according to the manuscript but normalize their spellings following \"\n \"Classical Latin forms. Use upper-case letters for proper nouns, \"\n 'the first word of each chant, and the first word after \"Alleluia\" for '\n \"Mass Alleluias. Punctuation is omitted.\",\n null=True,\n blank=True,\n )\n manuscript_full_text_std_proofread = models.BooleanField(blank=True, null=True)\n manuscript_full_text = models.TextField(\n help_text=\"Enter the wording, word order and spellings as found in the manuscript\"\n \", with abbreviations resolved to standard words. Use upper-case letters as found\"\n \" in the source. Retain \u201cXpistum\u201d (Christum), \u201cIhc\u201d (Jesus) and other instances of \"\n \"Greek characters with their closest approximations of Latin letters. Some punctuation\"\n \" signs and vertical dividing lines | are employed in this field. Repetenda and psalm \"\n \"cues can also be recorded here. For more information, contact Cantus Database staff.\",\n null=True,\n blank=True,\n )\n manuscript_full_text_proofread = models.BooleanField(blank=True, null=True)\n manuscript_syllabized_full_text = models.TextField(null=True, blank=True)\n volpiano = models.TextField(null=True, blank=True)\n volpiano_proofread = models.BooleanField(blank=True, null=True)\n volpiano_notes = models.TextField(null=True, blank=True)\n volpiano_intervals = models.TextField(null=True, blank=True)\n # volpiano_intervals = ArrayField(base_field=models.IntegerField(), null=True, blank=True)\n cao_concordances = models.CharField(blank=True, null=True, max_length=63)\n proofread_by = models.ForeignKey(\n User, on_delete=models.PROTECT, null=True, blank=True\n )\n melody_id = models.CharField(blank=True, null=True, max_length=63)\n search_vector = SearchVectorField(null=True, editable=False)\n content_structure = models.CharField(\n blank=True,\n null=True,\n max_length=64,\n help_text=\"Additional folio number field, if folio numbers appear on the leaves but are not in the 'binding order'.\",\n )\n", "path": "django/cantusdb_project/main_app/models/sequence.py"}]}
1,653
194
gh_patches_debug_28234
rasdani/github-patches
git_diff
quantumlib__Cirq-3054
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support multi-qubit measurements in `cirq.plot_state_histogram` Quote from the docstring: "Currently this function assumes each measurement gate applies to only a single qubit." Currently, I get an incorrect histogram if I didn't read the docstring and used a multi-qubit measurement (I always make circuits like this): ```python qubits = cirq.LineQubit.range(3) c = cirq.Circuit( (cirq.X**0.4).on_each(*qubits), cirq.measure(*qubits), # One multi-qubit measurement ) cirq.plot_state_histogram(cirq.sample(c, repetitions=10000)) # Incorrect output, no warning or error ``` ![index2](https://user-images.githubusercontent.com/2476062/82605089-e5c42400-9ba4-11ea-8eb5-516586620a45.png) If I use single-qubit measurement gates, I get the expected histogram: ```python qubits = cirq.LineQubit.range(3) c = cirq.Circuit( (cirq.X**0.4).on_each(*qubits), cirq.measure_each(*qubits), # One measurement per qubit ) cirq.plot_state_histogram(cirq.sample(c, repetitions=10000)) ``` ![index](https://user-images.githubusercontent.com/2476062/82605108-ebba0500-9ba4-11ea-9752-f337ecab1d26.png) This looks like it could be fixed by adding some logic to `plot_state_histogram` (https://github.com/quantumlib/Cirq/blob/master/cirq/study/visualize.py#L22) that checks for multi-qubit measurements and either correctly interpret them or raise an error. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cirq/study/visualize.py` Content: ``` 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Tool to visualize the results of a study.""" 16 17 import numpy as np 18 19 from cirq.study import trial_result 20 21 22 def plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray: 23 """Plot the state histogram from a single result with repetitions. 24 25 States is a bitstring representation of all the qubit states in a single 26 result. 27 Currently this function assumes each measurement gate applies to only 28 a single qubit. 29 30 Args: 31 result: The trial results to plot. 32 33 Returns: 34 The histogram. A list of values plotted on the y-axis. 35 """ 36 37 # pyplot import is deferred because it requires a system dependency 38 # (python3-tk) that `python -m pip install cirq` can't handle for the user. 39 # This allows cirq to be usable without python3-tk. 40 import matplotlib.pyplot as plt 41 42 num_qubits = len(result.measurements.keys()) 43 states = 2**num_qubits 44 values = np.zeros(states) 45 46 # measurements is a dict of {measurement gate key: 47 # array(repetitions, boolean result)} 48 # Convert this to an array of repetitions, each with an array of booleans. 49 # e.g. {q1: array([[True, True]]), q2: array([[False, False]])} 50 # --> array([[True, False], [True, False]]) 51 measurement_by_result = np.array([ 52 v.transpose()[0] for k, v in result.measurements.items()]).transpose() 53 54 for meas in measurement_by_result: 55 # Convert each array of booleans to a string representation. 56 # e.g. [True, False] -> [1, 0] -> '10' -> 2 57 state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2) 58 values[state_ind] += 1 59 60 plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)] 61 plt.bar(np.arange(states), values, tick_label=plot_labels) 62 plt.xlabel('qubit state') 63 plt.ylabel('result count') 64 plt.show() 65 66 return values 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cirq/study/visualize.py b/cirq/study/visualize.py --- a/cirq/study/visualize.py +++ b/cirq/study/visualize.py @@ -24,8 +24,6 @@ States is a bitstring representation of all the qubit states in a single result. - Currently this function assumes each measurement gate applies to only - a single qubit. Args: result: The trial results to plot. @@ -39,17 +37,15 @@ # This allows cirq to be usable without python3-tk. import matplotlib.pyplot as plt - num_qubits = len(result.measurements.keys()) + num_qubits = sum([value.shape[1] for value in result.measurements.values()]) states = 2**num_qubits values = np.zeros(states) - # measurements is a dict of {measurement gate key: # array(repetitions, boolean result)} # Convert this to an array of repetitions, each with an array of booleans. # e.g. {q1: array([[True, True]]), q2: array([[False, False]])} # --> array([[True, False], [True, False]]) - measurement_by_result = np.array([ - v.transpose()[0] for k, v in result.measurements.items()]).transpose() + measurement_by_result = np.hstack(list(result.measurements.values())) for meas in measurement_by_result: # Convert each array of booleans to a string representation.
{"golden_diff": "diff --git a/cirq/study/visualize.py b/cirq/study/visualize.py\n--- a/cirq/study/visualize.py\n+++ b/cirq/study/visualize.py\n@@ -24,8 +24,6 @@\n \n States is a bitstring representation of all the qubit states in a single\n result.\n- Currently this function assumes each measurement gate applies to only\n- a single qubit.\n \n Args:\n result: The trial results to plot.\n@@ -39,17 +37,15 @@\n # This allows cirq to be usable without python3-tk.\n import matplotlib.pyplot as plt\n \n- num_qubits = len(result.measurements.keys())\n+ num_qubits = sum([value.shape[1] for value in result.measurements.values()])\n states = 2**num_qubits\n values = np.zeros(states)\n-\n # measurements is a dict of {measurement gate key:\n # array(repetitions, boolean result)}\n # Convert this to an array of repetitions, each with an array of booleans.\n # e.g. {q1: array([[True, True]]), q2: array([[False, False]])}\n # --> array([[True, False], [True, False]])\n- measurement_by_result = np.array([\n- v.transpose()[0] for k, v in result.measurements.items()]).transpose()\n+ measurement_by_result = np.hstack(list(result.measurements.values()))\n \n for meas in measurement_by_result:\n # Convert each array of booleans to a string representation.\n", "issue": "Support multi-qubit measurements in `cirq.plot_state_histogram`\nQuote from the docstring: \"Currently this function assumes each measurement gate applies to only a single qubit.\"\r\n\r\nCurrently, I get an incorrect histogram if I didn't read the docstring and used a multi-qubit measurement (I always make circuits like this):\r\n```python\r\nqubits = cirq.LineQubit.range(3)\r\nc = cirq.Circuit(\r\n (cirq.X**0.4).on_each(*qubits),\r\n cirq.measure(*qubits), # One multi-qubit measurement\r\n)\r\ncirq.plot_state_histogram(cirq.sample(c, repetitions=10000))\r\n# Incorrect output, no warning or error\r\n```\r\n![index2](https://user-images.githubusercontent.com/2476062/82605089-e5c42400-9ba4-11ea-8eb5-516586620a45.png)\r\n\r\nIf I use single-qubit measurement gates, I get the expected histogram:\r\n```python\r\nqubits = cirq.LineQubit.range(3)\r\nc = cirq.Circuit(\r\n (cirq.X**0.4).on_each(*qubits),\r\n cirq.measure_each(*qubits), # One measurement per qubit\r\n)\r\ncirq.plot_state_histogram(cirq.sample(c, repetitions=10000))\r\n```\r\n![index](https://user-images.githubusercontent.com/2476062/82605108-ebba0500-9ba4-11ea-9752-f337ecab1d26.png)\r\n\r\nThis looks like it could be fixed by adding some logic to `plot_state_histogram` (https://github.com/quantumlib/Cirq/blob/master/cirq/study/visualize.py#L22) that checks for multi-qubit measurements and either correctly interpret them or raise an error.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tool to visualize the results of a study.\"\"\"\n\nimport numpy as np\n\nfrom cirq.study import trial_result\n\n\ndef plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray:\n \"\"\"Plot the state histogram from a single result with repetitions.\n\n States is a bitstring representation of all the qubit states in a single\n result.\n Currently this function assumes each measurement gate applies to only\n a single qubit.\n\n Args:\n result: The trial results to plot.\n\n Returns:\n The histogram. A list of values plotted on the y-axis.\n \"\"\"\n\n # pyplot import is deferred because it requires a system dependency\n # (python3-tk) that `python -m pip install cirq` can't handle for the user.\n # This allows cirq to be usable without python3-tk.\n import matplotlib.pyplot as plt\n\n num_qubits = len(result.measurements.keys())\n states = 2**num_qubits\n values = np.zeros(states)\n\n # measurements is a dict of {measurement gate key:\n # array(repetitions, boolean result)}\n # Convert this to an array of repetitions, each with an array of booleans.\n # e.g. {q1: array([[True, True]]), q2: array([[False, False]])}\n # --> array([[True, False], [True, False]])\n measurement_by_result = np.array([\n v.transpose()[0] for k, v in result.measurements.items()]).transpose()\n\n for meas in measurement_by_result:\n # Convert each array of booleans to a string representation.\n # e.g. [True, False] -> [1, 0] -> '10' -> 2\n state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2)\n values[state_ind] += 1\n\n plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)]\n plt.bar(np.arange(states), values, tick_label=plot_labels)\n plt.xlabel('qubit state')\n plt.ylabel('result count')\n plt.show()\n\n return values\n", "path": "cirq/study/visualize.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tool to visualize the results of a study.\"\"\"\n\nimport numpy as np\n\nfrom cirq.study import trial_result\n\n\ndef plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray:\n \"\"\"Plot the state histogram from a single result with repetitions.\n\n States is a bitstring representation of all the qubit states in a single\n result.\n\n Args:\n result: The trial results to plot.\n\n Returns:\n The histogram. A list of values plotted on the y-axis.\n \"\"\"\n\n # pyplot import is deferred because it requires a system dependency\n # (python3-tk) that `python -m pip install cirq` can't handle for the user.\n # This allows cirq to be usable without python3-tk.\n import matplotlib.pyplot as plt\n\n num_qubits = sum([value.shape[1] for value in result.measurements.values()])\n states = 2**num_qubits\n values = np.zeros(states)\n # measurements is a dict of {measurement gate key:\n # array(repetitions, boolean result)}\n # Convert this to an array of repetitions, each with an array of booleans.\n # e.g. {q1: array([[True, True]]), q2: array([[False, False]])}\n # --> array([[True, False], [True, False]])\n measurement_by_result = np.hstack(list(result.measurements.values()))\n\n for meas in measurement_by_result:\n # Convert each array of booleans to a string representation.\n # e.g. [True, False] -> [1, 0] -> '10' -> 2\n state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2)\n values[state_ind] += 1\n\n plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)]\n plt.bar(np.arange(states), values, tick_label=plot_labels)\n plt.xlabel('qubit state')\n plt.ylabel('result count')\n plt.show()\n\n return values\n", "path": "cirq/study/visualize.py"}]}
1,416
344
gh_patches_debug_26493
rasdani/github-patches
git_diff
Pylons__pyramid-2567
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- exception views squash the original exception if an exception view predicate is tested and fails If you register an exception view with a predicate such as `config.add_view(view, context=Exception, request_method='GET')`, this will cause any errors from a POST request (without a matching exception view) to be squashed into a `PredicateMismatch` exception which will be propagated up the wsgi stack instead of the original exception. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyramid/tweens.py` Content: ``` 1 import sys 2 3 from pyramid.interfaces import ( 4 IExceptionViewClassifier, 5 IRequest, 6 ) 7 8 from zope.interface import providedBy 9 from pyramid.view import _call_view 10 11 def excview_tween_factory(handler, registry): 12 """ A :term:`tween` factory which produces a tween that catches an 13 exception raised by downstream tweens (or the main Pyramid request 14 handler) and, if possible, converts it into a Response using an 15 :term:`exception view`.""" 16 17 def excview_tween(request): 18 attrs = request.__dict__ 19 try: 20 response = handler(request) 21 except Exception as exc: 22 # WARNING: do not assign the result of sys.exc_info() to a local 23 # var here, doing so will cause a leak. We used to actually 24 # explicitly delete both "exception" and "exc_info" from ``attrs`` 25 # in a ``finally:`` clause below, but now we do not because these 26 # attributes are useful to upstream tweens. This actually still 27 # apparently causes a reference cycle, but it is broken 28 # successfully by the garbage collector (see 29 # https://github.com/Pylons/pyramid/issues/1223). 30 attrs['exc_info'] = sys.exc_info() 31 attrs['exception'] = exc 32 # clear old generated request.response, if any; it may 33 # have been mutated by the view, and its state is not 34 # sane (e.g. caching headers) 35 if 'response' in attrs: 36 del attrs['response'] 37 # we use .get instead of .__getitem__ below due to 38 # https://github.com/Pylons/pyramid/issues/700 39 request_iface = attrs.get('request_iface', IRequest) 40 provides = providedBy(exc) 41 response = _call_view( 42 registry, 43 request, 44 exc, 45 provides, 46 '', 47 view_classifier=IExceptionViewClassifier, 48 request_iface=request_iface.combined 49 ) 50 if response is None: 51 raise 52 53 return response 54 55 return excview_tween 56 57 MAIN = 'MAIN' 58 INGRESS = 'INGRESS' 59 EXCVIEW = 'pyramid.tweens.excview_tween_factory' 60 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyramid/tweens.py b/pyramid/tweens.py --- a/pyramid/tweens.py +++ b/pyramid/tweens.py @@ -1,5 +1,7 @@ import sys +from pyramid.compat import reraise +from pyramid.exceptions import PredicateMismatch from pyramid.interfaces import ( IExceptionViewClassifier, IRequest, @@ -38,17 +40,26 @@ # https://github.com/Pylons/pyramid/issues/700 request_iface = attrs.get('request_iface', IRequest) provides = providedBy(exc) - response = _call_view( - registry, - request, - exc, - provides, - '', - view_classifier=IExceptionViewClassifier, - request_iface=request_iface.combined - ) + try: + response = _call_view( + registry, + request, + exc, + provides, + '', + view_classifier=IExceptionViewClassifier, + request_iface=request_iface.combined + ) + + # if views matched but did not pass predicates, squash the error + # and re-raise the original exception + except PredicateMismatch: + response = None + + # re-raise the original exception as no exception views were + # able to handle the error if response is None: - raise + reraise(*attrs['exc_info']) return response
{"golden_diff": "diff --git a/pyramid/tweens.py b/pyramid/tweens.py\n--- a/pyramid/tweens.py\n+++ b/pyramid/tweens.py\n@@ -1,5 +1,7 @@\n import sys\n \n+from pyramid.compat import reraise\n+from pyramid.exceptions import PredicateMismatch\n from pyramid.interfaces import (\n IExceptionViewClassifier,\n IRequest,\n@@ -38,17 +40,26 @@\n # https://github.com/Pylons/pyramid/issues/700\n request_iface = attrs.get('request_iface', IRequest)\n provides = providedBy(exc)\n- response = _call_view(\n- registry,\n- request,\n- exc,\n- provides,\n- '',\n- view_classifier=IExceptionViewClassifier,\n- request_iface=request_iface.combined\n- )\n+ try:\n+ response = _call_view(\n+ registry,\n+ request,\n+ exc,\n+ provides,\n+ '',\n+ view_classifier=IExceptionViewClassifier,\n+ request_iface=request_iface.combined\n+ )\n+\n+ # if views matched but did not pass predicates, squash the error\n+ # and re-raise the original exception\n+ except PredicateMismatch:\n+ response = None\n+\n+ # re-raise the original exception as no exception views were\n+ # able to handle the error\n if response is None:\n- raise\n+ reraise(*attrs['exc_info'])\n \n return response\n", "issue": "exception views squash the original exception if an exception view predicate is tested and fails\nIf you register an exception view with a predicate such as `config.add_view(view, context=Exception, request_method='GET')`, this will cause any errors from a POST request (without a matching exception view) to be squashed into a `PredicateMismatch` exception which will be propagated up the wsgi stack instead of the original exception.\n\n", "before_files": [{"content": "import sys\n\nfrom pyramid.interfaces import (\n IExceptionViewClassifier,\n IRequest,\n )\n\nfrom zope.interface import providedBy\nfrom pyramid.view import _call_view\n\ndef excview_tween_factory(handler, registry):\n \"\"\" A :term:`tween` factory which produces a tween that catches an\n exception raised by downstream tweens (or the main Pyramid request\n handler) and, if possible, converts it into a Response using an\n :term:`exception view`.\"\"\"\n\n def excview_tween(request):\n attrs = request.__dict__\n try:\n response = handler(request)\n except Exception as exc:\n # WARNING: do not assign the result of sys.exc_info() to a local\n # var here, doing so will cause a leak. We used to actually\n # explicitly delete both \"exception\" and \"exc_info\" from ``attrs``\n # in a ``finally:`` clause below, but now we do not because these\n # attributes are useful to upstream tweens. This actually still\n # apparently causes a reference cycle, but it is broken\n # successfully by the garbage collector (see\n # https://github.com/Pylons/pyramid/issues/1223).\n attrs['exc_info'] = sys.exc_info()\n attrs['exception'] = exc\n # clear old generated request.response, if any; it may\n # have been mutated by the view, and its state is not\n # sane (e.g. caching headers)\n if 'response' in attrs:\n del attrs['response']\n # we use .get instead of .__getitem__ below due to\n # https://github.com/Pylons/pyramid/issues/700\n request_iface = attrs.get('request_iface', IRequest)\n provides = providedBy(exc)\n response = _call_view(\n registry,\n request,\n exc,\n provides,\n '',\n view_classifier=IExceptionViewClassifier,\n request_iface=request_iface.combined\n )\n if response is None:\n raise\n\n return response\n\n return excview_tween\n\nMAIN = 'MAIN'\nINGRESS = 'INGRESS'\nEXCVIEW = 'pyramid.tweens.excview_tween_factory'\n", "path": "pyramid/tweens.py"}], "after_files": [{"content": "import sys\n\nfrom pyramid.compat import reraise\nfrom pyramid.exceptions import PredicateMismatch\nfrom pyramid.interfaces import (\n IExceptionViewClassifier,\n IRequest,\n )\n\nfrom zope.interface import providedBy\nfrom pyramid.view import _call_view\n\ndef excview_tween_factory(handler, registry):\n \"\"\" A :term:`tween` factory which produces a tween that catches an\n exception raised by downstream tweens (or the main Pyramid request\n handler) and, if possible, converts it into a Response using an\n :term:`exception view`.\"\"\"\n\n def excview_tween(request):\n attrs = request.__dict__\n try:\n response = handler(request)\n except Exception as exc:\n # WARNING: do not assign the result of sys.exc_info() to a local\n # var here, doing so will cause a leak. We used to actually\n # explicitly delete both \"exception\" and \"exc_info\" from ``attrs``\n # in a ``finally:`` clause below, but now we do not because these\n # attributes are useful to upstream tweens. This actually still\n # apparently causes a reference cycle, but it is broken\n # successfully by the garbage collector (see\n # https://github.com/Pylons/pyramid/issues/1223).\n attrs['exc_info'] = sys.exc_info()\n attrs['exception'] = exc\n # clear old generated request.response, if any; it may\n # have been mutated by the view, and its state is not\n # sane (e.g. caching headers)\n if 'response' in attrs:\n del attrs['response']\n # we use .get instead of .__getitem__ below due to\n # https://github.com/Pylons/pyramid/issues/700\n request_iface = attrs.get('request_iface', IRequest)\n provides = providedBy(exc)\n try:\n response = _call_view(\n registry,\n request,\n exc,\n provides,\n '',\n view_classifier=IExceptionViewClassifier,\n request_iface=request_iface.combined\n )\n\n # if views matched but did not pass predicates, squash the error\n # and re-raise the original exception\n except PredicateMismatch:\n response = None\n\n # re-raise the original exception as no exception views were\n # able to handle the error\n if response is None:\n reraise(*attrs['exc_info'])\n\n return response\n\n return excview_tween\n\nMAIN = 'MAIN'\nINGRESS = 'INGRESS'\nEXCVIEW = 'pyramid.tweens.excview_tween_factory'\n", "path": "pyramid/tweens.py"}]}
935
325
gh_patches_debug_8056
rasdani/github-patches
git_diff
googleapis__python-bigquery-80
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- chore: replace Artman with bazel for synthesizing code The synthtool should start using bazel instead of Artman. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `synth.py` Content: ``` 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """This script is used to synthesize generated parts of this library.""" 16 17 import synthtool as s 18 from synthtool import gcp 19 20 gapic = gcp.GAPICGenerator() 21 common = gcp.CommonTemplates() 22 version = 'v2' 23 24 library = gapic.py_library( 25 'bigquery', 26 version, 27 config_path='/google/cloud/bigquery/' 28 'artman_bigquery_v2.yaml', 29 artman_output_name='bigquery-v2', 30 include_protos=True, 31 ) 32 33 s.move( 34 [ 35 library / "google/cloud/bigquery_v2/gapic/enums.py", 36 library / "google/cloud/bigquery_v2/types.py", 37 library / "google/cloud/bigquery_v2/proto/location*", 38 library / "google/cloud/bigquery_v2/proto/encryption_config*", 39 library / "google/cloud/bigquery_v2/proto/model*", 40 library / "google/cloud/bigquery_v2/proto/standard_sql*", 41 ], 42 ) 43 44 # Fix up proto docs that are missing summary line. 45 s.replace( 46 "google/cloud/bigquery_v2/proto/model_pb2.py", 47 '"""Attributes:', 48 '"""Protocol buffer.\n\n Attributes:', 49 ) 50 s.replace( 51 "google/cloud/bigquery_v2/proto/encryption_config_pb2.py", 52 '"""Attributes:', 53 '"""Encryption configuration.\n\n Attributes:', 54 ) 55 56 # Remove non-ascii characters from docstrings for Python 2.7. 57 # Format quoted strings as plain text. 58 s.replace("google/cloud/bigquery_v2/proto/*.py", "[“”]", '``') 59 60 # ---------------------------------------------------------------------------- 61 # Add templated files 62 # ---------------------------------------------------------------------------- 63 templated_files = common.py_library(cov_level=100) 64 s.move(templated_files, excludes=["noxfile.py"]) 65 66 s.shell.run(["nox", "-s", "blacken"], hide_output=False) 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/synth.py b/synth.py --- a/synth.py +++ b/synth.py @@ -17,16 +17,14 @@ import synthtool as s from synthtool import gcp -gapic = gcp.GAPICGenerator() +gapic = gcp.GAPICBazel() common = gcp.CommonTemplates() version = 'v2' library = gapic.py_library( - 'bigquery', - version, - config_path='/google/cloud/bigquery/' - 'artman_bigquery_v2.yaml', - artman_output_name='bigquery-v2', + service='bigquery', + version=version, + bazel_target=f"//google/cloud/bigquery/{version}:bigquery-{version}-py", include_protos=True, )
{"golden_diff": "diff --git a/synth.py b/synth.py\n--- a/synth.py\n+++ b/synth.py\n@@ -17,16 +17,14 @@\n import synthtool as s\n from synthtool import gcp\n \n-gapic = gcp.GAPICGenerator()\n+gapic = gcp.GAPICBazel()\n common = gcp.CommonTemplates()\n version = 'v2'\n \n library = gapic.py_library(\n- 'bigquery',\n- version,\n- config_path='/google/cloud/bigquery/'\n- 'artman_bigquery_v2.yaml',\n- artman_output_name='bigquery-v2',\n+ service='bigquery',\n+ version=version,\n+ bazel_target=f\"//google/cloud/bigquery/{version}:bigquery-{version}-py\",\n include_protos=True,\n )\n", "issue": "chore: replace Artman with bazel for synthesizing code\nThe synthtool should start using bazel instead of Artman.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\n\nimport synthtool as s\nfrom synthtool import gcp\n\ngapic = gcp.GAPICGenerator()\ncommon = gcp.CommonTemplates()\nversion = 'v2'\n\nlibrary = gapic.py_library(\n 'bigquery',\n version,\n config_path='/google/cloud/bigquery/'\n 'artman_bigquery_v2.yaml',\n artman_output_name='bigquery-v2',\n include_protos=True,\n)\n\ns.move(\n [\n library / \"google/cloud/bigquery_v2/gapic/enums.py\",\n library / \"google/cloud/bigquery_v2/types.py\",\n library / \"google/cloud/bigquery_v2/proto/location*\",\n library / \"google/cloud/bigquery_v2/proto/encryption_config*\",\n library / \"google/cloud/bigquery_v2/proto/model*\",\n library / \"google/cloud/bigquery_v2/proto/standard_sql*\",\n ],\n)\n\n# Fix up proto docs that are missing summary line.\ns.replace(\n \"google/cloud/bigquery_v2/proto/model_pb2.py\",\n '\"\"\"Attributes:',\n '\"\"\"Protocol buffer.\\n\\n Attributes:',\n)\ns.replace(\n \"google/cloud/bigquery_v2/proto/encryption_config_pb2.py\",\n '\"\"\"Attributes:',\n '\"\"\"Encryption configuration.\\n\\n Attributes:',\n)\n\n# Remove non-ascii characters from docstrings for Python 2.7.\n# Format quoted strings as plain text.\ns.replace(\"google/cloud/bigquery_v2/proto/*.py\", \"[\u201c\u201d]\", '``')\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(cov_level=100)\ns.move(templated_files, excludes=[\"noxfile.py\"])\n\ns.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False)\n", "path": "synth.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\n\nimport synthtool as s\nfrom synthtool import gcp\n\ngapic = gcp.GAPICBazel()\ncommon = gcp.CommonTemplates()\nversion = 'v2'\n\nlibrary = gapic.py_library(\n service='bigquery',\n version=version,\n bazel_target=f\"//google/cloud/bigquery/{version}:bigquery-{version}-py\",\n include_protos=True,\n)\n\ns.move(\n [\n library / \"google/cloud/bigquery_v2/gapic/enums.py\",\n library / \"google/cloud/bigquery_v2/types.py\",\n library / \"google/cloud/bigquery_v2/proto/location*\",\n library / \"google/cloud/bigquery_v2/proto/encryption_config*\",\n library / \"google/cloud/bigquery_v2/proto/model*\",\n library / \"google/cloud/bigquery_v2/proto/standard_sql*\",\n ],\n)\n\n# Fix up proto docs that are missing summary line.\ns.replace(\n \"google/cloud/bigquery_v2/proto/model_pb2.py\",\n '\"\"\"Attributes:',\n '\"\"\"Protocol buffer.\\n\\n Attributes:',\n)\ns.replace(\n \"google/cloud/bigquery_v2/proto/encryption_config_pb2.py\",\n '\"\"\"Attributes:',\n '\"\"\"Encryption configuration.\\n\\n Attributes:',\n)\n\n# Remove non-ascii characters from docstrings for Python 2.7.\n# Format quoted strings as plain text.\ns.replace(\"google/cloud/bigquery_v2/proto/*.py\", \"[\u201c\u201d]\", '``')\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(cov_level=100)\ns.move(templated_files, excludes=[\"noxfile.py\"])\n\ns.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False)\n", "path": "synth.py"}]}
928
179
gh_patches_debug_3011
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-10572
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Most recent available `mambaforge=4.10` is simply too old Hello guys, just wanted to ask you if it's possible to have a more modern version available for `mambaforge` - the best and latest available to be sourced on RTD via the configuration file is 4.10 which is simply too old (maximum conda 4.10 and mamba 0.19) - updating to a modern mamba doesn't work, as you can see from me changing the conf file in https://github.com/ESMValGroup/ESMValTool/pull/3310/files with output in https://readthedocs.org/projects/esmvaltool/builds/21390633/ - mamba is stuck at 0.19.0, which, in turn, slows down the environment creation process to around 10 minutes (for more recent conda's, updating mamba to something like >=1.4.8 works very well, and updates conda to 23.3 or 23.4 too, but in this case the base versions are too old). If you need any help whatsoever, I offer to help, and once more, many thanks for your great work on RTD :beer: --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/builds/constants_docker.py` Content: ``` 1 """ 2 Define constants here to allow import them without any external dependency. 3 4 There are situations where we want to have access to these values without Django installed 5 (e.g. common/dockerfiles/tasks.py) 6 7 Note these constants where previously defined as Django settings in ``readthedocs/settings/base.py``. 8 """ 9 10 DOCKER_DEFAULT_IMAGE = "readthedocs/build" 11 12 # Adding a new tool/version to this setting requires: 13 # 14 # - a mapping between the expected version in the config file, to the full 15 # version installed via asdf (found via ``asdf list all <tool>``) 16 # 17 # - running the script ``./scripts/compile_version_upload.sh`` in 18 # development and production environments to compile and cache the new 19 # tool/version 20 # 21 # Note that when updating this options, you should also update the file: 22 # readthedocs/rtd_tests/fixtures/spec/v2/schema.json 23 RTD_DOCKER_BUILD_SETTINGS = { 24 # Mapping of build.os options to docker image. 25 "os": { 26 "ubuntu-20.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04", 27 "ubuntu-22.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04", 28 }, 29 # Mapping of build.tools options to specific versions. 30 "tools": { 31 "python": { 32 "2.7": "2.7.18", 33 "3.6": "3.6.15", 34 "3.7": "3.7.17", 35 "3.8": "3.8.17", 36 "3.9": "3.9.17", 37 "3.10": "3.10.12", 38 "3.11": "3.11.4", 39 # Always point to the latest stable release. 40 "3": "3.11.4", 41 "miniconda3-4.7": "miniconda3-4.7.12", 42 "mambaforge-4.10": "mambaforge-4.10.3-10", 43 }, 44 "nodejs": { 45 "14": "14.20.1", 46 "16": "16.18.1", 47 "18": "18.16.1", # LTS 48 "19": "19.0.1", 49 "20": "20.3.1", 50 }, 51 "rust": { 52 "1.55": "1.55.0", 53 "1.61": "1.61.0", 54 "1.64": "1.64.0", 55 "1.70": "1.70.0", 56 }, 57 "golang": { 58 "1.17": "1.17.13", 59 "1.18": "1.18.10", 60 "1.19": "1.19.10", 61 "1.20": "1.20.5", 62 }, 63 }, 64 } 65 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py --- a/readthedocs/builds/constants_docker.py +++ b/readthedocs/builds/constants_docker.py @@ -40,6 +40,7 @@ "3": "3.11.4", "miniconda3-4.7": "miniconda3-4.7.12", "mambaforge-4.10": "mambaforge-4.10.3-10", + "mambaforge-22.9": "mambaforge-22.9.0-3", }, "nodejs": { "14": "14.20.1",
{"golden_diff": "diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py\n--- a/readthedocs/builds/constants_docker.py\n+++ b/readthedocs/builds/constants_docker.py\n@@ -40,6 +40,7 @@\n \"3\": \"3.11.4\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n+ \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n", "issue": "Most recent available `mambaforge=4.10` is simply too old\nHello guys, just wanted to ask you if it's possible to have a more modern version available for `mambaforge` - the best and latest available to be sourced on RTD via the configuration file is 4.10 which is simply too old (maximum conda 4.10 and mamba 0.19) - updating to a modern mamba doesn't work, as you can see from me changing the conf file in https://github.com/ESMValGroup/ESMValTool/pull/3310/files with output in https://readthedocs.org/projects/esmvaltool/builds/21390633/ - mamba is stuck at 0.19.0, which, in turn, slows down the environment creation process to around 10 minutes (for more recent conda's, updating mamba to something like >=1.4.8 works very well, and updates conda to 23.3 or 23.4 too, but in this case the base versions are too old). If you need any help whatsoever, I offer to help, and once more, many thanks for your great work on RTD :beer: \n", "before_files": [{"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# Adding a new tool/version to this setting requires:\n#\n# - a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``)\n#\n# - running the script ``./scripts/compile_version_upload.sh`` in\n# development and production environments to compile and cache the new\n# tool/version\n#\n# Note that when updating this options, you should also update the file:\n# readthedocs/rtd_tests/fixtures/spec/v2/schema.json\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.17\",\n \"3.9\": \"3.9.17\",\n \"3.10\": \"3.10.12\",\n \"3.11\": \"3.11.4\",\n # Always point to the latest stable release.\n \"3\": \"3.11.4\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\", # LTS\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.10\",\n \"1.20\": \"1.20.5\",\n },\n },\n}\n", "path": "readthedocs/builds/constants_docker.py"}], "after_files": [{"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# Adding a new tool/version to this setting requires:\n#\n# - a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``)\n#\n# - running the script ``./scripts/compile_version_upload.sh`` in\n# development and production environments to compile and cache the new\n# tool/version\n#\n# Note that when updating this options, you should also update the file:\n# readthedocs/rtd_tests/fixtures/spec/v2/schema.json\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.17\",\n \"3.9\": \"3.9.17\",\n \"3.10\": \"3.10.12\",\n \"3.11\": \"3.11.4\",\n # Always point to the latest stable release.\n \"3\": \"3.11.4\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\", # LTS\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.10\",\n \"1.20\": \"1.20.5\",\n },\n },\n}\n", "path": "readthedocs/builds/constants_docker.py"}]}
1,331
170
gh_patches_debug_18135
rasdani/github-patches
git_diff
streamlink__streamlink-3484
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Turkuvaz Plugin missing Streams Hi, first of all to be sure installed Streamlink 2.0.0 via Python3 again. After that tested all streams one by one with the turkuvaz.py Most of them are working, only 2 of 9 channels missing, "error: No plugin can handle URL" A2 and A Haber TV: https://www.atv.com.tr/a2tv/canli-yayin https://www.ahaber.com.tr/video/canli-yayin --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugins/turkuvaz.py` Content: ``` 1 import logging 2 import re 3 4 from streamlink.plugin import Plugin 5 from streamlink.plugin.api import useragents, validate 6 from streamlink.stream import HLSStream 7 8 log = logging.getLogger(__name__) 9 10 11 class Turkuvaz(Plugin): 12 """ 13 Plugin to support ATV/A2TV Live streams from www.atv.com.tr and www.a2tv.com.tr 14 """ 15 16 _url_re = re.compile(r"""(?x)https?://(?:www\.)? 17 (?: 18 (?: 19 (atvavrupa)\.tv 20 | 21 (atv|a2tv|ahaber|aspor|minikago|minikacocuk|anews)\.com\.tr 22 )/webtv/(?:live-broadcast|canli-yayin) 23 | 24 sabah\.com\.tr/(apara)/canli-yayin 25 )""") 26 _hls_url = "https://trkvz-live.ercdn.net/{channel}/{channel}.m3u8" 27 _token_url = "https://securevideotoken.tmgrup.com.tr/webtv/secure" 28 _token_schema = validate.Schema(validate.all( 29 { 30 "Success": True, 31 "Url": validate.url(), 32 }, 33 validate.get("Url")) 34 ) 35 36 @classmethod 37 def can_handle_url(cls, url): 38 return cls._url_re.match(url) is not None 39 40 def _get_streams(self): 41 url_m = self._url_re.match(self.url) 42 domain = url_m.group(1) or url_m.group(2) or url_m.group(3) 43 # remap the domain to channel 44 channel = {"atv": "atvhd", 45 "ahaber": "ahaberhd", 46 "apara": "aparahd", 47 "aspor": "asporhd", 48 "anews": "anewshd", 49 "minikacocuk": "minikagococuk"}.get(domain, domain) 50 hls_url = self._hls_url.format(channel=channel) 51 # get the secure HLS URL 52 res = self.session.http.get(self._token_url, 53 params="url={0}".format(hls_url), 54 headers={"Referer": self.url, 55 "User-Agent": useragents.CHROME}) 56 57 secure_hls_url = self.session.http.json(res, schema=self._token_schema) 58 59 log.debug("Found HLS URL: {0}".format(secure_hls_url)) 60 return HLSStream.parse_variant_playlist(self.session, secure_hls_url) 61 62 63 __plugin__ = Turkuvaz 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugins/turkuvaz.py b/src/streamlink/plugins/turkuvaz.py --- a/src/streamlink/plugins/turkuvaz.py +++ b/src/streamlink/plugins/turkuvaz.py @@ -20,6 +20,10 @@ | (atv|a2tv|ahaber|aspor|minikago|minikacocuk|anews)\.com\.tr )/webtv/(?:live-broadcast|canli-yayin) + | + (ahaber)\.com\.tr/video/canli-yayin + | + atv\.com\.tr/(a2tv)/canli-yayin | sabah\.com\.tr/(apara)/canli-yayin )""") @@ -39,7 +43,7 @@ def _get_streams(self): url_m = self._url_re.match(self.url) - domain = url_m.group(1) or url_m.group(2) or url_m.group(3) + domain = url_m.group(1) or url_m.group(2) or url_m.group(3) or url_m.group(4) or url_m.group(5) # remap the domain to channel channel = {"atv": "atvhd", "ahaber": "ahaberhd",
{"golden_diff": "diff --git a/src/streamlink/plugins/turkuvaz.py b/src/streamlink/plugins/turkuvaz.py\n--- a/src/streamlink/plugins/turkuvaz.py\n+++ b/src/streamlink/plugins/turkuvaz.py\n@@ -20,6 +20,10 @@\n |\n (atv|a2tv|ahaber|aspor|minikago|minikacocuk|anews)\\.com\\.tr\n )/webtv/(?:live-broadcast|canli-yayin)\n+ |\n+ (ahaber)\\.com\\.tr/video/canli-yayin\n+ |\n+ atv\\.com\\.tr/(a2tv)/canli-yayin\n |\n sabah\\.com\\.tr/(apara)/canli-yayin\n )\"\"\")\n@@ -39,7 +43,7 @@\n \n def _get_streams(self):\n url_m = self._url_re.match(self.url)\n- domain = url_m.group(1) or url_m.group(2) or url_m.group(3)\n+ domain = url_m.group(1) or url_m.group(2) or url_m.group(3) or url_m.group(4) or url_m.group(5)\n # remap the domain to channel\n channel = {\"atv\": \"atvhd\",\n \"ahaber\": \"ahaberhd\",\n", "issue": "Turkuvaz Plugin missing Streams\nHi,\r\n\r\nfirst of all to be sure installed Streamlink 2.0.0 via Python3 again.\r\n\r\nAfter that tested all streams one by one with the turkuvaz.py\r\n\r\nMost of them are working, only 2 of 9 channels missing, \"error: No plugin can handle URL\"\r\n\r\nA2 and A Haber TV:\r\n\r\nhttps://www.atv.com.tr/a2tv/canli-yayin\r\nhttps://www.ahaber.com.tr/video/canli-yayin\r\n\r\n\n", "before_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\nclass Turkuvaz(Plugin):\n \"\"\"\n Plugin to support ATV/A2TV Live streams from www.atv.com.tr and www.a2tv.com.tr\n \"\"\"\n\n _url_re = re.compile(r\"\"\"(?x)https?://(?:www\\.)?\n (?:\n (?:\n (atvavrupa)\\.tv\n |\n (atv|a2tv|ahaber|aspor|minikago|minikacocuk|anews)\\.com\\.tr\n )/webtv/(?:live-broadcast|canli-yayin)\n |\n sabah\\.com\\.tr/(apara)/canli-yayin\n )\"\"\")\n _hls_url = \"https://trkvz-live.ercdn.net/{channel}/{channel}.m3u8\"\n _token_url = \"https://securevideotoken.tmgrup.com.tr/webtv/secure\"\n _token_schema = validate.Schema(validate.all(\n {\n \"Success\": True,\n \"Url\": validate.url(),\n },\n validate.get(\"Url\"))\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n\n def _get_streams(self):\n url_m = self._url_re.match(self.url)\n domain = url_m.group(1) or url_m.group(2) or url_m.group(3)\n # remap the domain to channel\n channel = {\"atv\": \"atvhd\",\n \"ahaber\": \"ahaberhd\",\n \"apara\": \"aparahd\",\n \"aspor\": \"asporhd\",\n \"anews\": \"anewshd\",\n \"minikacocuk\": \"minikagococuk\"}.get(domain, domain)\n hls_url = self._hls_url.format(channel=channel)\n # get the secure HLS URL\n res = self.session.http.get(self._token_url,\n params=\"url={0}\".format(hls_url),\n headers={\"Referer\": self.url,\n \"User-Agent\": useragents.CHROME})\n\n secure_hls_url = self.session.http.json(res, schema=self._token_schema)\n\n log.debug(\"Found HLS URL: {0}\".format(secure_hls_url))\n return HLSStream.parse_variant_playlist(self.session, secure_hls_url)\n\n\n__plugin__ = Turkuvaz\n", "path": "src/streamlink/plugins/turkuvaz.py"}], "after_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\nclass Turkuvaz(Plugin):\n \"\"\"\n Plugin to support ATV/A2TV Live streams from www.atv.com.tr and www.a2tv.com.tr\n \"\"\"\n\n _url_re = re.compile(r\"\"\"(?x)https?://(?:www\\.)?\n (?:\n (?:\n (atvavrupa)\\.tv\n |\n (atv|a2tv|ahaber|aspor|minikago|minikacocuk|anews)\\.com\\.tr\n )/webtv/(?:live-broadcast|canli-yayin)\n |\n (ahaber)\\.com\\.tr/video/canli-yayin\n |\n atv\\.com\\.tr/(a2tv)/canli-yayin\n |\n sabah\\.com\\.tr/(apara)/canli-yayin\n )\"\"\")\n _hls_url = \"https://trkvz-live.ercdn.net/{channel}/{channel}.m3u8\"\n _token_url = \"https://securevideotoken.tmgrup.com.tr/webtv/secure\"\n _token_schema = validate.Schema(validate.all(\n {\n \"Success\": True,\n \"Url\": validate.url(),\n },\n validate.get(\"Url\"))\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n\n def _get_streams(self):\n url_m = self._url_re.match(self.url)\n domain = url_m.group(1) or url_m.group(2) or url_m.group(3) or url_m.group(4) or url_m.group(5)\n # remap the domain to channel\n channel = {\"atv\": \"atvhd\",\n \"ahaber\": \"ahaberhd\",\n \"apara\": \"aparahd\",\n \"aspor\": \"asporhd\",\n \"anews\": \"anewshd\",\n \"minikacocuk\": \"minikagococuk\"}.get(domain, domain)\n hls_url = self._hls_url.format(channel=channel)\n # get the secure HLS URL\n res = self.session.http.get(self._token_url,\n params=\"url={0}\".format(hls_url),\n headers={\"Referer\": self.url,\n \"User-Agent\": useragents.CHROME})\n\n secure_hls_url = self.session.http.json(res, schema=self._token_schema)\n\n log.debug(\"Found HLS URL: {0}\".format(secure_hls_url))\n return HLSStream.parse_variant_playlist(self.session, secure_hls_url)\n\n\n__plugin__ = Turkuvaz\n", "path": "src/streamlink/plugins/turkuvaz.py"}]}
1,051
305
gh_patches_debug_30776
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-223
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Marshall's --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/marshalls.py` Content: ``` 1 import json 2 import re 3 import scrapy 4 from locations.items import GeojsonPointItem 5 6 STATES = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA", 7 "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD", 8 "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ", 9 "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC", 10 "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"] 11 12 DAYS = {'Mon': 'Mo', 'Tue': 'Tu', 13 'Wed': 'We', 'Thu': 'Th', 14 'Fri': 'Fr', 'Sat': 'Sa', 15 'Sun': 'Su'} 16 17 URL = 'https://mktsvc.tjx.com/storelocator/GetSearchResultsByState' 18 19 20 def normalize_time(hours): 21 22 if not hours: 23 return '' 24 25 day_times = hours.split(',') 26 normalize_day_times = [] 27 28 for day_time in day_times: 29 day, hours = [x.strip() for x in day_time.split(': ')] 30 normalize_hours = [] 31 32 if re.search('-', day): 33 days = [x.strip() for x in day.split('-')] 34 norm_days = '-'.join([DAYS.get(x, '') for x in days]) 35 else: 36 norm_days = DAYS.get(day, '') 37 38 if re.search('CLOSED', hours): 39 norm_hours = ' off' 40 normalize_hours.append(norm_hours) 41 else: 42 if re.search('-', hours): 43 hours = [x.strip() for x in hours.split('-')] 44 45 for hour in hours: 46 47 if hour[-1] == 'p': 48 if re.search(':', hour[:-1]): 49 hora, minute = [x.strip() for x in hour[:-1].split(':')] 50 if int(hora) < 12: 51 norm_hours = str(int(hora) + 12) + ':' + minute 52 else: 53 if int(hour[:-1]) < 12: 54 norm_hours = str(int(hour[:-1]) + 12) + ":00" 55 56 elif hour[-1] == 'a': 57 if re.search(':', hour[:-1]): 58 hora, minute = [x.strip() for x in hour[:-1].split(':')] 59 norm_hours = hora + ':' + minute 60 else: 61 norm_hours = hour[:-1] + ":00" 62 63 normalize_hours.append(norm_hours) 64 65 normalize_day_times.append(' '.join([norm_days, '-'.join(normalize_hours)])) 66 return '; '.join(normalize_day_times) 67 68 69 class MarshallsSpider(scrapy.Spider): 70 71 name = "marshalls" 72 allowed_domains = ["mktsvc.tjx.com", 'www.marshallsonline.com'] 73 74 def start_requests(self): 75 url = URL 76 77 headers = { 78 'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6', 79 'Origin': 'https://www.marshallsonline.com', 80 'Accept-Encoding': 'gzip, deflate, br', 81 'Accept': 'application/json, text/plain, */*', 82 'Referer': 'https://www.marshallsonline.com/store-finder/by-state', 83 'Connection': 'keep-alive', 84 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 85 } 86 87 for state in STATES: 88 form_data = {'chain': '10', 'lang': 'en', 'state': state} 89 90 yield scrapy.http.FormRequest(url=url, method='POST', formdata=form_data, 91 headers=headers, callback=self.parse) 92 93 def parse(self, response): 94 95 data = json.loads(response.body_as_unicode()) 96 stores = data.get('Stores', None) 97 98 for store in stores: 99 lon_lat = [store.pop('Longitude', None), store.pop('Latitude', None)] 100 store['ref'] = URL + str(store.get('StoreID', None)) 101 102 opening_hours = normalize_time(store.get('Hours', '')) 103 104 if opening_hours: 105 store['opening_hours'] = opening_hours 106 store.pop('Hours', None) 107 108 yield GeojsonPointItem( 109 properties=store, 110 lon_lat=lon_lat 111 ) 112 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/marshalls.py b/locations/spiders/marshalls.py --- a/locations/spiders/marshalls.py +++ b/locations/spiders/marshalls.py @@ -17,10 +17,20 @@ URL = 'https://mktsvc.tjx.com/storelocator/GetSearchResultsByState' +NORMALIZE_KEYS = ( + ('addr:full', ['Address', 'Address2']), + ('addr:city', ['City']), + ('addr:state', ['State']), + ('addr:postcode', ['Zip']), + ('addr:country', ['Country']), + ('phone', ['Phone']), + ) + + def normalize_time(hours): if not hours: - return '' + return '' day_times = hours.split(',') normalize_day_times = [] @@ -94,18 +104,23 @@ data = json.loads(response.body_as_unicode()) stores = data.get('Stores', None) + props = {} for store in stores: - lon_lat = [store.pop('Longitude', None), store.pop('Latitude', None)] - store['ref'] = URL + str(store.get('StoreID', None)) + lon_lat = [store.pop('Longitude', ''), store.pop('Latitude', None)] + props['ref'] = store.pop('StoreID', None) + props['website'] = URL + + for new_key, old_keys in NORMALIZE_KEYS: + props[new_key] = ", ".join([store.pop(key, '').strip() for key in old_keys if store[key]]) - opening_hours = normalize_time(store.get('Hours', '')) + opening_hours = normalize_time(store.pop('Hours', '')) if opening_hours: - store['opening_hours'] = opening_hours - store.pop('Hours', None) + props['opening_hours'] = opening_hours + props.pop('Hours', None) yield GeojsonPointItem( - properties=store, + properties=props, lon_lat=lon_lat )
{"golden_diff": "diff --git a/locations/spiders/marshalls.py b/locations/spiders/marshalls.py\n--- a/locations/spiders/marshalls.py\n+++ b/locations/spiders/marshalls.py\n@@ -17,10 +17,20 @@\n URL = 'https://mktsvc.tjx.com/storelocator/GetSearchResultsByState'\n \n \n+NORMALIZE_KEYS = (\n+ ('addr:full', ['Address', 'Address2']),\n+ ('addr:city', ['City']),\n+ ('addr:state', ['State']),\n+ ('addr:postcode', ['Zip']),\n+ ('addr:country', ['Country']),\n+ ('phone', ['Phone']),\n+ )\n+\n+\n def normalize_time(hours):\n \n if not hours:\n- return ''\n+ return ''\n \n day_times = hours.split(',')\n normalize_day_times = []\n@@ -94,18 +104,23 @@\n \n data = json.loads(response.body_as_unicode())\n stores = data.get('Stores', None)\n+ props = {}\n \n for store in stores:\n- lon_lat = [store.pop('Longitude', None), store.pop('Latitude', None)]\n- store['ref'] = URL + str(store.get('StoreID', None))\n+ lon_lat = [store.pop('Longitude', ''), store.pop('Latitude', None)]\n+ props['ref'] = store.pop('StoreID', None)\n+ props['website'] = URL\n+\n+ for new_key, old_keys in NORMALIZE_KEYS:\n+ props[new_key] = \", \".join([store.pop(key, '').strip() for key in old_keys if store[key]])\n \n- opening_hours = normalize_time(store.get('Hours', ''))\n+ opening_hours = normalize_time(store.pop('Hours', ''))\n \n if opening_hours:\n- store['opening_hours'] = opening_hours\n- store.pop('Hours', None)\n+ props['opening_hours'] = opening_hours\n+ props.pop('Hours', None)\n \n yield GeojsonPointItem(\n- properties=store,\n+ properties=props,\n lon_lat=lon_lat\n )\n", "issue": "Marshall's\n\n", "before_files": [{"content": "import json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\nSTATES = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\",\n \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\",\n \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\",\n \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\n\nDAYS = {'Mon': 'Mo', 'Tue': 'Tu',\n 'Wed': 'We', 'Thu': 'Th',\n 'Fri': 'Fr', 'Sat': 'Sa',\n 'Sun': 'Su'}\n\nURL = 'https://mktsvc.tjx.com/storelocator/GetSearchResultsByState'\n\n\ndef normalize_time(hours):\n\n if not hours:\n return ''\n\n day_times = hours.split(',')\n normalize_day_times = []\n\n for day_time in day_times:\n day, hours = [x.strip() for x in day_time.split(': ')]\n normalize_hours = []\n\n if re.search('-', day):\n days = [x.strip() for x in day.split('-')]\n norm_days = '-'.join([DAYS.get(x, '') for x in days])\n else:\n norm_days = DAYS.get(day, '')\n\n if re.search('CLOSED', hours):\n norm_hours = ' off'\n normalize_hours.append(norm_hours)\n else:\n if re.search('-', hours):\n hours = [x.strip() for x in hours.split('-')]\n\n for hour in hours:\n\n if hour[-1] == 'p':\n if re.search(':', hour[:-1]):\n hora, minute = [x.strip() for x in hour[:-1].split(':')]\n if int(hora) < 12:\n norm_hours = str(int(hora) + 12) + ':' + minute\n else:\n if int(hour[:-1]) < 12:\n norm_hours = str(int(hour[:-1]) + 12) + \":00\"\n\n elif hour[-1] == 'a':\n if re.search(':', hour[:-1]):\n hora, minute = [x.strip() for x in hour[:-1].split(':')]\n norm_hours = hora + ':' + minute\n else:\n norm_hours = hour[:-1] + \":00\"\n\n normalize_hours.append(norm_hours)\n\n normalize_day_times.append(' '.join([norm_days, '-'.join(normalize_hours)]))\n return '; '.join(normalize_day_times)\n\n\nclass MarshallsSpider(scrapy.Spider):\n\n name = \"marshalls\"\n allowed_domains = [\"mktsvc.tjx.com\", 'www.marshallsonline.com']\n\n def start_requests(self):\n url = URL\n\n headers = {\n 'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6',\n 'Origin': 'https://www.marshallsonline.com',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept': 'application/json, text/plain, */*',\n 'Referer': 'https://www.marshallsonline.com/store-finder/by-state',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n }\n\n for state in STATES:\n form_data = {'chain': '10', 'lang': 'en', 'state': state}\n\n yield scrapy.http.FormRequest(url=url, method='POST', formdata=form_data,\n headers=headers, callback=self.parse)\n\n def parse(self, response):\n\n data = json.loads(response.body_as_unicode())\n stores = data.get('Stores', None)\n\n for store in stores:\n lon_lat = [store.pop('Longitude', None), store.pop('Latitude', None)]\n store['ref'] = URL + str(store.get('StoreID', None))\n\n opening_hours = normalize_time(store.get('Hours', ''))\n\n if opening_hours:\n store['opening_hours'] = opening_hours\n store.pop('Hours', None)\n\n yield GeojsonPointItem(\n properties=store,\n lon_lat=lon_lat\n )\n", "path": "locations/spiders/marshalls.py"}], "after_files": [{"content": "import json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\nSTATES = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\",\n \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\",\n \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\",\n \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\n\nDAYS = {'Mon': 'Mo', 'Tue': 'Tu',\n 'Wed': 'We', 'Thu': 'Th',\n 'Fri': 'Fr', 'Sat': 'Sa',\n 'Sun': 'Su'}\n\nURL = 'https://mktsvc.tjx.com/storelocator/GetSearchResultsByState'\n\n\nNORMALIZE_KEYS = (\n ('addr:full', ['Address', 'Address2']),\n ('addr:city', ['City']),\n ('addr:state', ['State']),\n ('addr:postcode', ['Zip']),\n ('addr:country', ['Country']),\n ('phone', ['Phone']),\n )\n\n\ndef normalize_time(hours):\n\n if not hours:\n return ''\n\n day_times = hours.split(',')\n normalize_day_times = []\n\n for day_time in day_times:\n day, hours = [x.strip() for x in day_time.split(': ')]\n normalize_hours = []\n\n if re.search('-', day):\n days = [x.strip() for x in day.split('-')]\n norm_days = '-'.join([DAYS.get(x, '') for x in days])\n else:\n norm_days = DAYS.get(day, '')\n\n if re.search('CLOSED', hours):\n norm_hours = ' off'\n normalize_hours.append(norm_hours)\n else:\n if re.search('-', hours):\n hours = [x.strip() for x in hours.split('-')]\n\n for hour in hours:\n\n if hour[-1] == 'p':\n if re.search(':', hour[:-1]):\n hora, minute = [x.strip() for x in hour[:-1].split(':')]\n if int(hora) < 12:\n norm_hours = str(int(hora) + 12) + ':' + minute\n else:\n if int(hour[:-1]) < 12:\n norm_hours = str(int(hour[:-1]) + 12) + \":00\"\n\n elif hour[-1] == 'a':\n if re.search(':', hour[:-1]):\n hora, minute = [x.strip() for x in hour[:-1].split(':')]\n norm_hours = hora + ':' + minute\n else:\n norm_hours = hour[:-1] + \":00\"\n\n normalize_hours.append(norm_hours)\n\n normalize_day_times.append(' '.join([norm_days, '-'.join(normalize_hours)]))\n return '; '.join(normalize_day_times)\n\n\nclass MarshallsSpider(scrapy.Spider):\n\n name = \"marshalls\"\n allowed_domains = [\"mktsvc.tjx.com\", 'www.marshallsonline.com']\n\n def start_requests(self):\n url = URL\n\n headers = {\n 'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6',\n 'Origin': 'https://www.marshallsonline.com',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept': 'application/json, text/plain, */*',\n 'Referer': 'https://www.marshallsonline.com/store-finder/by-state',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n }\n\n for state in STATES:\n form_data = {'chain': '10', 'lang': 'en', 'state': state}\n\n yield scrapy.http.FormRequest(url=url, method='POST', formdata=form_data,\n headers=headers, callback=self.parse)\n\n def parse(self, response):\n\n data = json.loads(response.body_as_unicode())\n stores = data.get('Stores', None)\n props = {}\n\n for store in stores:\n lon_lat = [store.pop('Longitude', ''), store.pop('Latitude', None)]\n props['ref'] = store.pop('StoreID', None)\n props['website'] = URL\n\n for new_key, old_keys in NORMALIZE_KEYS:\n props[new_key] = \", \".join([store.pop(key, '').strip() for key in old_keys if store[key]])\n\n opening_hours = normalize_time(store.pop('Hours', ''))\n\n if opening_hours:\n props['opening_hours'] = opening_hours\n props.pop('Hours', None)\n\n yield GeojsonPointItem(\n properties=props,\n lon_lat=lon_lat\n )\n", "path": "locations/spiders/marshalls.py"}]}
1,478
464
gh_patches_debug_11984
rasdani/github-patches
git_diff
dotkom__onlineweb4-420
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Mark rules error for anonymous users on events 'AnonymousUser' object has no attribute 'mark_rules' --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `apps/events/forms.py` Content: ``` 1 from django import forms 2 from captcha.fields import CaptchaField 3 4 class CaptchaForm(forms.Form): 5 def __init__(self, *args, **kwargs): 6 user = kwargs.pop('user', None) 7 super(CaptchaForm, self).__init__(*args, **kwargs) 8 # Removing mark rules field if user has already accepted the rules 9 if user and user.mark_rules: 10 del self.fields['mark_rules'] 11 mark_rules = forms.BooleanField(label=u'Jeg godtar <a href="/profile/#marks" target="_blank">prikkreglene</a>') 12 captcha = CaptchaField() 13 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/apps/events/forms.py b/apps/events/forms.py --- a/apps/events/forms.py +++ b/apps/events/forms.py @@ -6,7 +6,7 @@ user = kwargs.pop('user', None) super(CaptchaForm, self).__init__(*args, **kwargs) # Removing mark rules field if user has already accepted the rules - if user and user.mark_rules: + if user and user.is_authenticated() and user.mark_rules: del self.fields['mark_rules'] mark_rules = forms.BooleanField(label=u'Jeg godtar <a href="/profile/#marks" target="_blank">prikkreglene</a>') captcha = CaptchaField()
{"golden_diff": "diff --git a/apps/events/forms.py b/apps/events/forms.py\n--- a/apps/events/forms.py\n+++ b/apps/events/forms.py\n@@ -6,7 +6,7 @@\n user = kwargs.pop('user', None)\n super(CaptchaForm, self).__init__(*args, **kwargs)\n # Removing mark rules field if user has already accepted the rules\n- if user and user.mark_rules:\n+ if user and user.is_authenticated() and user.mark_rules:\n del self.fields['mark_rules']\n mark_rules = forms.BooleanField(label=u'Jeg godtar <a href=\"/profile/#marks\" target=\"_blank\">prikkreglene</a>')\n captcha = CaptchaField()\n", "issue": "Mark rules error for anonymous users on events\n'AnonymousUser' object has no attribute 'mark_rules'\n\n", "before_files": [{"content": "from django import forms\nfrom captcha.fields import CaptchaField\n\nclass CaptchaForm(forms.Form):\n def __init__(self, *args, **kwargs):\n user = kwargs.pop('user', None)\n super(CaptchaForm, self).__init__(*args, **kwargs)\n # Removing mark rules field if user has already accepted the rules\n if user and user.mark_rules:\n del self.fields['mark_rules']\n mark_rules = forms.BooleanField(label=u'Jeg godtar <a href=\"/profile/#marks\" target=\"_blank\">prikkreglene</a>')\n captcha = CaptchaField()\n", "path": "apps/events/forms.py"}], "after_files": [{"content": "from django import forms\nfrom captcha.fields import CaptchaField\n\nclass CaptchaForm(forms.Form):\n def __init__(self, *args, **kwargs):\n user = kwargs.pop('user', None)\n super(CaptchaForm, self).__init__(*args, **kwargs)\n # Removing mark rules field if user has already accepted the rules\n if user and user.is_authenticated() and user.mark_rules:\n del self.fields['mark_rules']\n mark_rules = forms.BooleanField(label=u'Jeg godtar <a href=\"/profile/#marks\" target=\"_blank\">prikkreglene</a>')\n captcha = CaptchaField()\n", "path": "apps/events/forms.py"}]}
425
148
gh_patches_debug_219
rasdani/github-patches
git_diff
pypa__setuptools-2427
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Sphinx setup should be stricter I noticed that some of the docs pages are unreachable when navigating from the main RTD page. In particular, _I know_ that there's `history.rst` that is only accessible if one knows the URL upfront. I tracked this to https://github.com/pypa/setuptools/pull/2097 which removes entries from the TOC but doesn't reintroduce them in other places. Sphinx has a few toggles that make it nitpicky about warnings. I think this should be enabled in the CI to prevent such problems in the future. This should catch implicit orphan pages as well as dead references or typos. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 import subprocess 2 import sys 3 import os 4 5 6 # hack to run the bootstrap script so that jaraco.packaging.sphinx 7 # can invoke setup.py 8 'READTHEDOCS' in os.environ and subprocess.check_call( 9 [sys.executable, '-m', 'bootstrap'], 10 cwd=os.path.join(os.path.dirname(__file__), os.path.pardir), 11 ) 12 13 # -- Project information ----------------------------------------------------- 14 15 github_url = 'https://github.com' 16 github_sponsors_url = f'{github_url}/sponsors' 17 18 # -- General configuration -- 19 20 extensions = [ 21 'sphinx.ext.extlinks', # allows to create custom roles easily 22 'jaraco.packaging.sphinx', 23 'rst.linker', 24 ] 25 26 # Add any paths that contain templates here, relative to this directory. 27 templates_path = ['_templates'] 28 29 # The master toctree document. 30 master_doc = 'index' 31 32 # List of directories, relative to source directory, that shouldn't be searched 33 # for source files. 34 exclude_trees = [] 35 36 # The name of the Pygments (syntax highlighting) style to use. 37 pygments_style = 'sphinx' 38 39 # -- Options for extlinks extension --------------------------------------- 40 extlinks = { 41 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323 42 } 43 44 # -- Options for HTML output -- 45 46 # The theme to use for HTML and HTML Help pages. Major themes that come with 47 # Sphinx are currently 'default' and 'sphinxdoc'. 48 html_theme = 'nature' 49 50 # Add any paths that contain custom themes here, relative to this directory. 51 html_theme_path = ['_theme'] 52 53 # If true, SmartyPants will be used to convert quotes and dashes to 54 # typographically correct entities. 55 html_use_smartypants = True 56 57 # Custom sidebar templates, maps document names to template names. 58 html_sidebars = { 59 'index': [ 60 'relations.html', 'sourcelink.html', 'indexsidebar.html', 61 'searchbox.html']} 62 63 # If false, no module index is generated. 64 html_use_modindex = False 65 66 # If false, no index is generated. 67 html_use_index = False 68 69 # -- Options for LaTeX output -- 70 71 # Grouping the document tree into LaTeX files. List of tuples 72 # (source start file, target name, title, author, 73 # documentclass [howto/manual]). 74 latex_documents = [( 75 'index', 'Setuptools.tex', 'Setuptools Documentation', 76 'The fellowship of the packaging', 'manual', 77 )] 78 79 link_files = { 80 '../CHANGES.rst': dict( 81 using=dict( 82 BB='https://bitbucket.org', 83 GH='https://github.com', 84 ), 85 replace=[ 86 dict( 87 pattern=r'(Issue )?#(?P<issue>\d+)', 88 url='{package_url}/issues/{issue}', 89 ), 90 dict( 91 pattern=r'BB Pull Request ?#(?P<bb_pull_request>\d+)', 92 url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}', 93 ), 94 dict( 95 pattern=r'Distribute #(?P<distribute>\d+)', 96 url='{BB}/tarek/distribute/issue/{distribute}', 97 ), 98 dict( 99 pattern=r'Buildout #(?P<buildout>\d+)', 100 url='{GH}/buildout/buildout/issues/{buildout}', 101 ), 102 dict( 103 pattern=r'Old Setuptools #(?P<old_setuptools>\d+)', 104 url='http://bugs.python.org/setuptools/issue{old_setuptools}', 105 ), 106 dict( 107 pattern=r'Jython #(?P<jython>\d+)', 108 url='http://bugs.jython.org/issue{jython}', 109 ), 110 dict( 111 pattern=r'(Python #|bpo-)(?P<python>\d+)', 112 url='http://bugs.python.org/issue{python}', 113 ), 114 dict( 115 pattern=r'Interop #(?P<interop>\d+)', 116 url='{GH}/pypa/interoperability-peps/issues/{interop}', 117 ), 118 dict( 119 pattern=r'Pip #(?P<pip>\d+)', 120 url='{GH}/pypa/pip/issues/{pip}', 121 ), 122 dict( 123 pattern=r'Packaging #(?P<packaging>\d+)', 124 url='{GH}/pypa/packaging/issues/{packaging}', 125 ), 126 dict( 127 pattern=r'[Pp]ackaging (?P<packaging_ver>\d+(\.\d+)+)', 128 url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst', 129 ), 130 dict( 131 pattern=r'PEP[- ](?P<pep_number>\d+)', 132 url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/', 133 ), 134 dict( 135 pattern=r'setuptools_svn #(?P<setuptools_svn>\d+)', 136 url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}', 137 ), 138 dict( 139 pattern=r'pypa/distutils#(?P<distutils>\d+)', 140 url='{GH}/pypa/distutils/issues/{distutils}', 141 ), 142 dict( 143 pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n', 144 with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n', 145 ), 146 ], 147 ), 148 } 149 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -146,3 +146,12 @@ ], ), } + + +# Be strict about any broken references: +nitpicky = True + + +# Ref: https://github.com/python-attrs/attrs/pull/571/files\ +# #diff-85987f48f1258d9ee486e3191495582dR82 +default_role = 'any'
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -146,3 +146,12 @@\n ],\n ),\n }\n+\n+\n+# Be strict about any broken references:\n+nitpicky = True\n+\n+\n+# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n+# #diff-85987f48f1258d9ee486e3191495582dR82\n+default_role = 'any'\n", "issue": "Sphinx setup should be stricter\nI noticed that some of the docs pages are unreachable when navigating from the main RTD page. In particular, _I know_ that there's `history.rst` that is only accessible if one knows the URL upfront.\r\n\r\nI tracked this to https://github.com/pypa/setuptools/pull/2097 which removes entries from the TOC but doesn't reintroduce them in other places.\r\n\r\nSphinx has a few toggles that make it nitpicky about warnings. I think this should be enabled in the CI to prevent such problems in the future. This should catch implicit orphan pages as well as dead references or typos.\n", "before_files": [{"content": "import subprocess\nimport sys\nimport os\n\n\n# hack to run the bootstrap script so that jaraco.packaging.sphinx\n# can invoke setup.py\n'READTHEDOCS' in os.environ and subprocess.check_call(\n [sys.executable, '-m', 'bootstrap'],\n cwd=os.path.join(os.path.dirname(__file__), os.path.pardir),\n)\n\n# -- Project information -----------------------------------------------------\n\ngithub_url = 'https://github.com'\ngithub_sponsors_url = f'{github_url}/sponsors'\n\n# -- General configuration --\n\nextensions = [\n 'sphinx.ext.extlinks', # allows to create custom roles easily\n 'jaraco.packaging.sphinx',\n 'rst.linker',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for extlinks extension ---------------------------------------\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n}\n\n# -- Options for HTML output --\n\n# The theme to use for HTML and HTML Help pages. Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\nhtml_theme = 'nature'\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['_theme']\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\nhtml_sidebars = {\n 'index': [\n 'relations.html', 'sourcelink.html', 'indexsidebar.html',\n 'searchbox.html']}\n\n# If false, no module index is generated.\nhtml_use_modindex = False\n\n# If false, no index is generated.\nhtml_use_index = False\n\n# -- Options for LaTeX output --\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author,\n# documentclass [howto/manual]).\nlatex_documents = [(\n 'index', 'Setuptools.tex', 'Setuptools Documentation',\n 'The fellowship of the packaging', 'manual',\n)]\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/distutils#(?P<distutils>\\d+)',\n url='{GH}/pypa/distutils/issues/{distutils}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n", "path": "docs/conf.py"}], "after_files": [{"content": "import subprocess\nimport sys\nimport os\n\n\n# hack to run the bootstrap script so that jaraco.packaging.sphinx\n# can invoke setup.py\n'READTHEDOCS' in os.environ and subprocess.check_call(\n [sys.executable, '-m', 'bootstrap'],\n cwd=os.path.join(os.path.dirname(__file__), os.path.pardir),\n)\n\n# -- Project information -----------------------------------------------------\n\ngithub_url = 'https://github.com'\ngithub_sponsors_url = f'{github_url}/sponsors'\n\n# -- General configuration --\n\nextensions = [\n 'sphinx.ext.extlinks', # allows to create custom roles easily\n 'jaraco.packaging.sphinx',\n 'rst.linker',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for extlinks extension ---------------------------------------\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n}\n\n# -- Options for HTML output --\n\n# The theme to use for HTML and HTML Help pages. Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\nhtml_theme = 'nature'\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['_theme']\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\nhtml_sidebars = {\n 'index': [\n 'relations.html', 'sourcelink.html', 'indexsidebar.html',\n 'searchbox.html']}\n\n# If false, no module index is generated.\nhtml_use_modindex = False\n\n# If false, no index is generated.\nhtml_use_index = False\n\n# -- Options for LaTeX output --\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author,\n# documentclass [howto/manual]).\nlatex_documents = [(\n 'index', 'Setuptools.tex', 'Setuptools Documentation',\n 'The fellowship of the packaging', 'manual',\n)]\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/distutils#(?P<distutils>\\d+)',\n url='{GH}/pypa/distutils/issues/{distutils}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n\n\n# Be strict about any broken references:\nnitpicky = True\n\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n", "path": "docs/conf.py"}]}
1,918
130
gh_patches_debug_34686
rasdani/github-patches
git_diff
wagtail__wagtail-1225
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Memory leak in RoutablePage When resolving/reversing URLs, `RoutablePage` calls Djangos `django.core.urlresolvers.get_resolver` function to build a resolver object. This function is wrapped in an unlimited lru cache. As each time we call it is usually with a different page instance, this lru cache would grow forever. I've not seen any issues caused by this in the wild, but worth fixing. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/contrib/wagtailroutablepage/models.py` Content: ``` 1 from __future__ import unicode_literals 2 3 from six import string_types 4 5 from django.http import Http404 6 from django.core.urlresolvers import get_resolver 7 from django.core.exceptions import ImproperlyConfigured 8 9 from wagtail.wagtailcore.models import Page 10 from wagtail.wagtailcore.url_routing import RouteResult 11 12 13 class RoutablePageMixin(object): 14 """ 15 This class can be mixed in to a Page subclass to allow urlconfs to be 16 embedded inside pages. 17 """ 18 #: Set this to a tuple of ``django.conf.urls.url`` objects. 19 subpage_urls = None 20 21 def reverse_subpage(self, name, args=None, kwargs=None): 22 """ 23 This method does the same job as Djangos' built in "urlresolvers.reverse()" function for subpage urlconfs. 24 """ 25 args = args or [] 26 kwargs = kwargs or {} 27 28 if self.subpage_urls is None: 29 raise ImproperlyConfigured("You must set 'subpage_urls' on " + type(self).__name__) 30 31 resolver = get_resolver(self.subpage_urls) 32 return resolver.reverse(name, *args, **kwargs) 33 34 def resolve_subpage(self, path): 35 """ 36 This finds a view method/function from a URL path. 37 """ 38 if self.subpage_urls is None: 39 raise ImproperlyConfigured("You must set 'subpage_urls' on " + type(self).__name__) 40 41 resolver = get_resolver(self.subpage_urls) 42 view, args, kwargs = resolver.resolve(path) 43 44 # If view is a string, find it as an attribute of self 45 if isinstance(view, string_types): 46 view = getattr(self, view) 47 48 return view, args, kwargs 49 50 def route(self, request, path_components): 51 """ 52 This hooks the subpage urls into Wagtails routing. 53 """ 54 if self.live: 55 try: 56 path = '/' 57 if path_components: 58 path += '/'.join(path_components) + '/' 59 60 view, args, kwargs = self.resolve_subpage(path) 61 return RouteResult(self, args=(view, args, kwargs)) 62 except Http404: 63 pass 64 65 return super(RoutablePageMixin, self).route(request, path_components) 66 67 def serve(self, request, view, args, kwargs): 68 return view(request, *args, **kwargs) 69 70 def serve_preview(self, request, mode_name): 71 view, args, kwargs = self.resolve_subpage('/') 72 return view(request, *args, **kwargs) 73 74 75 class RoutablePage(RoutablePageMixin, Page): 76 """ 77 This class extends Page by adding methods to allow urlconfs 78 to be embedded inside pages 79 """ 80 81 is_abstract = True 82 83 class Meta: 84 abstract = True 85 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/contrib/wagtailroutablepage/models.py b/wagtail/contrib/wagtailroutablepage/models.py --- a/wagtail/contrib/wagtailroutablepage/models.py +++ b/wagtail/contrib/wagtailroutablepage/models.py @@ -3,8 +3,7 @@ from six import string_types from django.http import Http404 -from django.core.urlresolvers import get_resolver -from django.core.exceptions import ImproperlyConfigured +from django.core.urlresolvers import RegexURLResolver from wagtail.wagtailcore.models import Page from wagtail.wagtailcore.url_routing import RouteResult @@ -18,28 +17,36 @@ #: Set this to a tuple of ``django.conf.urls.url`` objects. subpage_urls = None + @classmethod + def get_subpage_urls(cls): + if cls.subpage_urls: + return cls.subpage_urls + + return () + + @classmethod + def get_resolver(cls): + if '_routablepage_urlresolver' not in cls.__dict__: + subpage_urls = cls.get_subpage_urls() + cls._routablepage_urlresolver = RegexURLResolver(r'^/', subpage_urls) + + return cls._routablepage_urlresolver + def reverse_subpage(self, name, args=None, kwargs=None): """ - This method does the same job as Djangos' built in "urlresolvers.reverse()" function for subpage urlconfs. + This method does the same job as Djangos' built in + "urlresolvers.reverse()" function for subpage urlconfs. """ args = args or [] kwargs = kwargs or {} - if self.subpage_urls is None: - raise ImproperlyConfigured("You must set 'subpage_urls' on " + type(self).__name__) - - resolver = get_resolver(self.subpage_urls) - return resolver.reverse(name, *args, **kwargs) + return self.get_resolver().reverse(name, *args, **kwargs) def resolve_subpage(self, path): """ This finds a view method/function from a URL path. """ - if self.subpage_urls is None: - raise ImproperlyConfigured("You must set 'subpage_urls' on " + type(self).__name__) - - resolver = get_resolver(self.subpage_urls) - view, args, kwargs = resolver.resolve(path) + view, args, kwargs = self.get_resolver().resolve(path) # If view is a string, find it as an attribute of self if isinstance(view, string_types):
{"golden_diff": "diff --git a/wagtail/contrib/wagtailroutablepage/models.py b/wagtail/contrib/wagtailroutablepage/models.py\n--- a/wagtail/contrib/wagtailroutablepage/models.py\n+++ b/wagtail/contrib/wagtailroutablepage/models.py\n@@ -3,8 +3,7 @@\n from six import string_types\n \n from django.http import Http404\n-from django.core.urlresolvers import get_resolver\n-from django.core.exceptions import ImproperlyConfigured\n+from django.core.urlresolvers import RegexURLResolver\n \n from wagtail.wagtailcore.models import Page\n from wagtail.wagtailcore.url_routing import RouteResult\n@@ -18,28 +17,36 @@\n #: Set this to a tuple of ``django.conf.urls.url`` objects.\n subpage_urls = None\n \n+ @classmethod\n+ def get_subpage_urls(cls):\n+ if cls.subpage_urls:\n+ return cls.subpage_urls\n+\n+ return ()\n+\n+ @classmethod\n+ def get_resolver(cls):\n+ if '_routablepage_urlresolver' not in cls.__dict__:\n+ subpage_urls = cls.get_subpage_urls()\n+ cls._routablepage_urlresolver = RegexURLResolver(r'^/', subpage_urls)\n+\n+ return cls._routablepage_urlresolver\n+\n def reverse_subpage(self, name, args=None, kwargs=None):\n \"\"\"\n- This method does the same job as Djangos' built in \"urlresolvers.reverse()\" function for subpage urlconfs.\n+ This method does the same job as Djangos' built in\n+ \"urlresolvers.reverse()\" function for subpage urlconfs.\n \"\"\"\n args = args or []\n kwargs = kwargs or {}\n \n- if self.subpage_urls is None:\n- raise ImproperlyConfigured(\"You must set 'subpage_urls' on \" + type(self).__name__)\n-\n- resolver = get_resolver(self.subpage_urls)\n- return resolver.reverse(name, *args, **kwargs)\n+ return self.get_resolver().reverse(name, *args, **kwargs)\n \n def resolve_subpage(self, path):\n \"\"\"\n This finds a view method/function from a URL path.\n \"\"\"\n- if self.subpage_urls is None:\n- raise ImproperlyConfigured(\"You must set 'subpage_urls' on \" + type(self).__name__)\n-\n- resolver = get_resolver(self.subpage_urls)\n- view, args, kwargs = resolver.resolve(path)\n+ view, args, kwargs = self.get_resolver().resolve(path)\n \n # If view is a string, find it as an attribute of self\n if isinstance(view, string_types):\n", "issue": "Memory leak in RoutablePage\nWhen resolving/reversing URLs, `RoutablePage` calls Djangos `django.core.urlresolvers.get_resolver` function to build a resolver object.\n\nThis function is wrapped in an unlimited lru cache. As each time we call it is usually with a different page instance, this lru cache would grow forever.\n\nI've not seen any issues caused by this in the wild, but worth fixing.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom six import string_types\n\nfrom django.http import Http404\nfrom django.core.urlresolvers import get_resolver\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailcore.url_routing import RouteResult\n\n\nclass RoutablePageMixin(object):\n \"\"\"\n This class can be mixed in to a Page subclass to allow urlconfs to be\n embedded inside pages.\n \"\"\"\n #: Set this to a tuple of ``django.conf.urls.url`` objects.\n subpage_urls = None\n\n def reverse_subpage(self, name, args=None, kwargs=None):\n \"\"\"\n This method does the same job as Djangos' built in \"urlresolvers.reverse()\" function for subpage urlconfs.\n \"\"\"\n args = args or []\n kwargs = kwargs or {}\n\n if self.subpage_urls is None:\n raise ImproperlyConfigured(\"You must set 'subpage_urls' on \" + type(self).__name__)\n\n resolver = get_resolver(self.subpage_urls)\n return resolver.reverse(name, *args, **kwargs)\n\n def resolve_subpage(self, path):\n \"\"\"\n This finds a view method/function from a URL path.\n \"\"\"\n if self.subpage_urls is None:\n raise ImproperlyConfigured(\"You must set 'subpage_urls' on \" + type(self).__name__)\n\n resolver = get_resolver(self.subpage_urls)\n view, args, kwargs = resolver.resolve(path)\n\n # If view is a string, find it as an attribute of self\n if isinstance(view, string_types):\n view = getattr(self, view)\n\n return view, args, kwargs\n\n def route(self, request, path_components):\n \"\"\"\n This hooks the subpage urls into Wagtails routing.\n \"\"\"\n if self.live:\n try:\n path = '/'\n if path_components:\n path += '/'.join(path_components) + '/'\n\n view, args, kwargs = self.resolve_subpage(path)\n return RouteResult(self, args=(view, args, kwargs))\n except Http404:\n pass\n\n return super(RoutablePageMixin, self).route(request, path_components)\n\n def serve(self, request, view, args, kwargs):\n return view(request, *args, **kwargs)\n\n def serve_preview(self, request, mode_name):\n view, args, kwargs = self.resolve_subpage('/')\n return view(request, *args, **kwargs)\n\n\nclass RoutablePage(RoutablePageMixin, Page):\n \"\"\"\n This class extends Page by adding methods to allow urlconfs\n to be embedded inside pages\n \"\"\"\n\n is_abstract = True\n\n class Meta:\n abstract = True\n", "path": "wagtail/contrib/wagtailroutablepage/models.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom six import string_types\n\nfrom django.http import Http404\nfrom django.core.urlresolvers import RegexURLResolver\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailcore.url_routing import RouteResult\n\n\nclass RoutablePageMixin(object):\n \"\"\"\n This class can be mixed in to a Page subclass to allow urlconfs to be\n embedded inside pages.\n \"\"\"\n #: Set this to a tuple of ``django.conf.urls.url`` objects.\n subpage_urls = None\n\n @classmethod\n def get_subpage_urls(cls):\n if cls.subpage_urls:\n return cls.subpage_urls\n\n return ()\n\n @classmethod\n def get_resolver(cls):\n if '_routablepage_urlresolver' not in cls.__dict__:\n subpage_urls = cls.get_subpage_urls()\n cls._routablepage_urlresolver = RegexURLResolver(r'^/', subpage_urls)\n\n return cls._routablepage_urlresolver\n\n def reverse_subpage(self, name, args=None, kwargs=None):\n \"\"\"\n This method does the same job as Djangos' built in\n \"urlresolvers.reverse()\" function for subpage urlconfs.\n \"\"\"\n args = args or []\n kwargs = kwargs or {}\n\n return self.get_resolver().reverse(name, *args, **kwargs)\n\n def resolve_subpage(self, path):\n \"\"\"\n This finds a view method/function from a URL path.\n \"\"\"\n view, args, kwargs = self.get_resolver().resolve(path)\n\n # If view is a string, find it as an attribute of self\n if isinstance(view, string_types):\n view = getattr(self, view)\n\n return view, args, kwargs\n\n def route(self, request, path_components):\n \"\"\"\n This hooks the subpage urls into Wagtails routing.\n \"\"\"\n if self.live:\n try:\n path = '/'\n if path_components:\n path += '/'.join(path_components) + '/'\n\n view, args, kwargs = self.resolve_subpage(path)\n return RouteResult(self, args=(view, args, kwargs))\n except Http404:\n pass\n\n return super(RoutablePageMixin, self).route(request, path_components)\n\n def serve(self, request, view, args, kwargs):\n return view(request, *args, **kwargs)\n\n def serve_preview(self, request, mode_name):\n view, args, kwargs = self.resolve_subpage('/')\n return view(request, *args, **kwargs)\n\n\nclass RoutablePage(RoutablePageMixin, Page):\n \"\"\"\n This class extends Page by adding methods to allow urlconfs\n to be embedded inside pages\n \"\"\"\n\n is_abstract = True\n\n class Meta:\n abstract = True\n", "path": "wagtail/contrib/wagtailroutablepage/models.py"}]}
1,117
594
gh_patches_debug_39320
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3323
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider woods_coffee is broken During the global build at 2021-10-20-14-42-48, spider **woods_coffee** failed with **0 features** and **1 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/woods_coffee.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/woods_coffee.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/woods_coffee.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/woods_coffee.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import scrapy 3 from locations.items import GeojsonPointItem 4 import re 5 6 daysKey = { 7 'MONDAY': 'Mo', 'TUESDAY': 'Tu', 'WEDNESDAY': 'We', 'THURSDAY': 'Th', 8 'FRIDAY': 'Fr', 'SATURDAY': 'Sa', 'SUNDAY': 'Su' 9 } 10 11 class WoodsCoffeeSpider(scrapy.Spider): 12 name = "woods_coffee" 13 item_attributes = { 'brand': "Woods Coffee" } 14 allowed_domains = ["www.woodscoffee.com"] 15 start_urls = ( 16 'https://woodscoffee.com/locations/', 17 ) 18 19 def store_hours(self, hours): 20 hours = hours.replace('–','-') 21 hours = hours.replace(u'\xa0', u' ') 22 days = hours.split(': ')[0].strip() 23 24 if('-' in days): 25 startDay = daysKey[days.split('-')[0]] 26 endDay = daysKey[days.split('-')[1]] 27 dayOutput = startDay + "-" + endDay 28 else: 29 if('DAILY' in days): 30 startDay='Mo' 31 endDay='Su' 32 dayOutput = startDay + "-" + endDay 33 else: 34 dayOutput = daysKey[days] 35 36 bothHours = hours.split(': ')[1].replace(' ','') 37 openHours = bothHours.split("-")[0] 38 closeHours = bothHours.split("-")[1] 39 40 if("AM" in openHours): 41 openHours = openHours.replace("AM","") 42 if(":" in openHours): 43 openH = openHours.split(":")[0] 44 openM = openHours.split(":")[1] 45 else: 46 openH = openHours 47 openM = "00" 48 openHours = openH + ":" + openM 49 50 if("PM" in openHours): 51 openHours = openHours.replace("PM","") 52 if(":" in openHours): 53 openH = openHours.split(":")[0] 54 openM = openHours.split(":")[1] 55 else: 56 openH = openHours 57 openM = "00" 58 openH = str(int(openH) + 12) 59 openHours = openH + ":" + openM 60 61 if("AM" in closeHours): 62 closeHours = closeHours.replace("AM","") 63 if(":" in closeHours): 64 closeH = closeHours.split(":")[0] 65 closeM = closeHours.split(":")[1] 66 else: 67 closeH = closeHours 68 closeM = "00" 69 closeHours = closeH + ":" + closeM 70 71 if("PM" in closeHours): 72 closeHours = closeHours.replace("PM","") 73 if(":" in closeHours): 74 closeH = closeHours.split(":")[0] 75 closeM = closeHours.split(":")[1] 76 else: 77 closeH = closeHours 78 closeM = "00" 79 closeH = str(int(closeH) + 12) 80 closeHours = closeH + ":" + closeM 81 82 return dayOutput +' '+ openHours.replace(' ','') + "-" + closeHours + ';' 83 84 def parse(self, response): 85 for match in response.xpath("//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div"): 86 cityState = match.xpath(".//div[contains(@class,'heading-text el-text')]/div/p/text()").extract_first(); 87 cityString = cityState.split(",")[0].strip() 88 stateString = cityState.split(",")[1].strip() 89 90 addressString = match.xpath(".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not(.//strong)]/text()").extract_first().strip() 91 postcodeString = addressString.split(stateString)[1].strip() 92 addressString = addressString.split(stateString)[0].replace(',','').strip().strip(cityString).strip() 93 94 95 if(match.xpath(".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not (.//strong)]/br/following-sibling::text()").extract_first() is None): 96 phoneString = "" 97 else: 98 phoneString = match.xpath(".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not (.//strong)]/br/following-sibling::text()").extract_first() 99 phoneString = phoneString.replace(' ','').strip() 100 101 hoursString = "" 102 for hoursMatch in match.xpath(".//p[contains(@style,'text-align: center;')]/strong//following-sibling::text()"): 103 hoursString = hoursString +' '+self.store_hours(hoursMatch.extract().replace('\n','')) 104 hoursString = hoursString.strip(';').strip() 105 106 yield GeojsonPointItem( 107 ref=match.xpath(".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()").extract_first(), 108 addr_full=addressString, 109 city=cityString, 110 state=stateString, 111 postcode=postcodeString, 112 phone=phoneString, 113 opening_hours=hoursString, 114 website=match.xpath(".//a/@href").extract_first(), 115 ) 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/woods_coffee.py b/locations/spiders/woods_coffee.py --- a/locations/spiders/woods_coffee.py +++ b/locations/spiders/woods_coffee.py @@ -1,16 +1,17 @@ # -*- coding: utf-8 -*- import scrapy + from locations.items import GeojsonPointItem -import re daysKey = { 'MONDAY': 'Mo', 'TUESDAY': 'Tu', 'WEDNESDAY': 'We', 'THURSDAY': 'Th', 'FRIDAY': 'Fr', 'SATURDAY': 'Sa', 'SUNDAY': 'Su' } + class WoodsCoffeeSpider(scrapy.Spider): name = "woods_coffee" - item_attributes = { 'brand': "Woods Coffee" } + item_attributes = {'brand': "Woods Coffee", "brand_wikidata": "Q8033255"} allowed_domains = ["www.woodscoffee.com"] start_urls = ( 'https://woodscoffee.com/locations/', @@ -82,8 +83,8 @@ return dayOutput +' '+ openHours.replace(' ','') + "-" + closeHours + ';' def parse(self, response): - for match in response.xpath("//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div"): - cityState = match.xpath(".//div[contains(@class,'heading-text el-text')]/div/p/text()").extract_first(); + for match in response.xpath("//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div/parent::div"): + cityState = match.xpath(".//div[contains(@class,'heading-text el-text')]/div/p/text()").extract_first() cityString = cityState.split(",")[0].strip() stateString = cityState.split(",")[1].strip() @@ -103,13 +104,17 @@ hoursString = hoursString +' '+self.store_hours(hoursMatch.extract().replace('\n','')) hoursString = hoursString.strip(';').strip() + name = match.xpath(".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()").extract_first() + yield GeojsonPointItem( - ref=match.xpath(".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()").extract_first(), + ref=name, + name=name, addr_full=addressString, city=cityString, state=stateString, postcode=postcodeString, + country="USA", phone=phoneString, opening_hours=hoursString, - website=match.xpath(".//a/@href").extract_first(), + website=response.urljoin(match.xpath(".//a/@href").extract_first()), )
{"golden_diff": "diff --git a/locations/spiders/woods_coffee.py b/locations/spiders/woods_coffee.py\n--- a/locations/spiders/woods_coffee.py\n+++ b/locations/spiders/woods_coffee.py\n@@ -1,16 +1,17 @@\n # -*- coding: utf-8 -*-\n import scrapy\n+\n from locations.items import GeojsonPointItem\n-import re\n \n daysKey = {\n 'MONDAY': 'Mo', 'TUESDAY': 'Tu', 'WEDNESDAY': 'We', 'THURSDAY': 'Th',\n 'FRIDAY': 'Fr', 'SATURDAY': 'Sa', 'SUNDAY': 'Su'\n }\n \n+\n class WoodsCoffeeSpider(scrapy.Spider):\n name = \"woods_coffee\"\n- item_attributes = { 'brand': \"Woods Coffee\" }\n+ item_attributes = {'brand': \"Woods Coffee\", \"brand_wikidata\": \"Q8033255\"}\n allowed_domains = [\"www.woodscoffee.com\"]\n start_urls = (\n 'https://woodscoffee.com/locations/',\n@@ -82,8 +83,8 @@\n return dayOutput +' '+ openHours.replace(' ','') + \"-\" + closeHours + ';'\n \n def parse(self, response):\n- for match in response.xpath(\"//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div\"):\n- cityState = match.xpath(\".//div[contains(@class,'heading-text el-text')]/div/p/text()\").extract_first();\n+ for match in response.xpath(\"//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div/parent::div\"):\n+ cityState = match.xpath(\".//div[contains(@class,'heading-text el-text')]/div/p/text()\").extract_first()\n cityString = cityState.split(\",\")[0].strip()\n stateString = cityState.split(\",\")[1].strip()\n \n@@ -103,13 +104,17 @@\n hoursString = hoursString +' '+self.store_hours(hoursMatch.extract().replace('\\n',''))\n hoursString = hoursString.strip(';').strip()\n \n+ name = match.xpath(\".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()\").extract_first()\n+\n yield GeojsonPointItem(\n- ref=match.xpath(\".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()\").extract_first(),\n+ ref=name,\n+ name=name,\n addr_full=addressString,\n city=cityString,\n state=stateString,\n postcode=postcodeString,\n+ country=\"USA\",\n phone=phoneString,\n opening_hours=hoursString,\n- website=match.xpath(\".//a/@href\").extract_first(),\n+ website=response.urljoin(match.xpath(\".//a/@href\").extract_first()),\n )\n", "issue": "Spider woods_coffee is broken\nDuring the global build at 2021-10-20-14-42-48, spider **woods_coffee** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/woods_coffee.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/woods_coffee.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/woods_coffee.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport re\n\ndaysKey = {\n 'MONDAY': 'Mo', 'TUESDAY': 'Tu', 'WEDNESDAY': 'We', 'THURSDAY': 'Th',\n 'FRIDAY': 'Fr', 'SATURDAY': 'Sa', 'SUNDAY': 'Su'\n}\n\nclass WoodsCoffeeSpider(scrapy.Spider):\n name = \"woods_coffee\"\n item_attributes = { 'brand': \"Woods Coffee\" }\n allowed_domains = [\"www.woodscoffee.com\"]\n start_urls = (\n 'https://woodscoffee.com/locations/',\n )\n\n def store_hours(self, hours):\n hours = hours.replace('\u2013','-')\n hours = hours.replace(u'\\xa0', u' ')\n days = hours.split(': ')[0].strip()\n\n if('-' in days):\n startDay = daysKey[days.split('-')[0]]\n endDay = daysKey[days.split('-')[1]]\n dayOutput = startDay + \"-\" + endDay\n else:\n if('DAILY' in days):\n startDay='Mo'\n endDay='Su'\n dayOutput = startDay + \"-\" + endDay\n else:\n dayOutput = daysKey[days]\n\n bothHours = hours.split(': ')[1].replace(' ','')\n openHours = bothHours.split(\"-\")[0]\n closeHours = bothHours.split(\"-\")[1]\n\n if(\"AM\" in openHours):\n openHours = openHours.replace(\"AM\",\"\")\n if(\":\" in openHours):\n openH = openHours.split(\":\")[0]\n openM = openHours.split(\":\")[1]\n else:\n openH = openHours\n openM = \"00\"\n openHours = openH + \":\" + openM\n\n if(\"PM\" in openHours):\n openHours = openHours.replace(\"PM\",\"\")\n if(\":\" in openHours):\n openH = openHours.split(\":\")[0]\n openM = openHours.split(\":\")[1]\n else:\n openH = openHours\n openM = \"00\"\n openH = str(int(openH) + 12)\n openHours = openH + \":\" + openM\n\n if(\"AM\" in closeHours):\n closeHours = closeHours.replace(\"AM\",\"\")\n if(\":\" in closeHours):\n closeH = closeHours.split(\":\")[0]\n closeM = closeHours.split(\":\")[1]\n else:\n closeH = closeHours\n closeM = \"00\"\n closeHours = closeH + \":\" + closeM\n\n if(\"PM\" in closeHours):\n closeHours = closeHours.replace(\"PM\",\"\")\n if(\":\" in closeHours):\n closeH = closeHours.split(\":\")[0]\n closeM = closeHours.split(\":\")[1]\n else:\n closeH = closeHours\n closeM = \"00\"\n closeH = str(int(closeH) + 12)\n closeHours = closeH + \":\" + closeM\n\n return dayOutput +' '+ openHours.replace(' ','') + \"-\" + closeHours + ';'\n\n def parse(self, response):\n for match in response.xpath(\"//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div\"):\n cityState = match.xpath(\".//div[contains(@class,'heading-text el-text')]/div/p/text()\").extract_first();\n cityString = cityState.split(\",\")[0].strip()\n stateString = cityState.split(\",\")[1].strip()\n\n addressString = match.xpath(\".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not(.//strong)]/text()\").extract_first().strip()\n postcodeString = addressString.split(stateString)[1].strip()\n addressString = addressString.split(stateString)[0].replace(',','').strip().strip(cityString).strip()\n\n\n if(match.xpath(\".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not (.//strong)]/br/following-sibling::text()\").extract_first() is None):\n phoneString = \"\"\n else:\n phoneString = match.xpath(\".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not (.//strong)]/br/following-sibling::text()\").extract_first()\n phoneString = phoneString.replace(' ','').strip()\n\n hoursString = \"\"\n for hoursMatch in match.xpath(\".//p[contains(@style,'text-align: center;')]/strong//following-sibling::text()\"):\n hoursString = hoursString +' '+self.store_hours(hoursMatch.extract().replace('\\n',''))\n hoursString = hoursString.strip(';').strip()\n\n yield GeojsonPointItem(\n ref=match.xpath(\".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()\").extract_first(),\n addr_full=addressString,\n city=cityString,\n state=stateString,\n postcode=postcodeString,\n phone=phoneString,\n opening_hours=hoursString,\n website=match.xpath(\".//a/@href\").extract_first(),\n )\n", "path": "locations/spiders/woods_coffee.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\ndaysKey = {\n 'MONDAY': 'Mo', 'TUESDAY': 'Tu', 'WEDNESDAY': 'We', 'THURSDAY': 'Th',\n 'FRIDAY': 'Fr', 'SATURDAY': 'Sa', 'SUNDAY': 'Su'\n}\n\n\nclass WoodsCoffeeSpider(scrapy.Spider):\n name = \"woods_coffee\"\n item_attributes = {'brand': \"Woods Coffee\", \"brand_wikidata\": \"Q8033255\"}\n allowed_domains = [\"www.woodscoffee.com\"]\n start_urls = (\n 'https://woodscoffee.com/locations/',\n )\n\n def store_hours(self, hours):\n hours = hours.replace('\u2013','-')\n hours = hours.replace(u'\\xa0', u' ')\n days = hours.split(': ')[0].strip()\n\n if('-' in days):\n startDay = daysKey[days.split('-')[0]]\n endDay = daysKey[days.split('-')[1]]\n dayOutput = startDay + \"-\" + endDay\n else:\n if('DAILY' in days):\n startDay='Mo'\n endDay='Su'\n dayOutput = startDay + \"-\" + endDay\n else:\n dayOutput = daysKey[days]\n\n bothHours = hours.split(': ')[1].replace(' ','')\n openHours = bothHours.split(\"-\")[0]\n closeHours = bothHours.split(\"-\")[1]\n\n if(\"AM\" in openHours):\n openHours = openHours.replace(\"AM\",\"\")\n if(\":\" in openHours):\n openH = openHours.split(\":\")[0]\n openM = openHours.split(\":\")[1]\n else:\n openH = openHours\n openM = \"00\"\n openHours = openH + \":\" + openM\n\n if(\"PM\" in openHours):\n openHours = openHours.replace(\"PM\",\"\")\n if(\":\" in openHours):\n openH = openHours.split(\":\")[0]\n openM = openHours.split(\":\")[1]\n else:\n openH = openHours\n openM = \"00\"\n openH = str(int(openH) + 12)\n openHours = openH + \":\" + openM\n\n if(\"AM\" in closeHours):\n closeHours = closeHours.replace(\"AM\",\"\")\n if(\":\" in closeHours):\n closeH = closeHours.split(\":\")[0]\n closeM = closeHours.split(\":\")[1]\n else:\n closeH = closeHours\n closeM = \"00\"\n closeHours = closeH + \":\" + closeM\n\n if(\"PM\" in closeHours):\n closeHours = closeHours.replace(\"PM\",\"\")\n if(\":\" in closeHours):\n closeH = closeHours.split(\":\")[0]\n closeM = closeHours.split(\":\")[1]\n else:\n closeH = closeHours\n closeM = \"00\"\n closeH = str(int(closeH) + 12)\n closeHours = closeH + \":\" + closeM\n\n return dayOutput +' '+ openHours.replace(' ','') + \"-\" + closeHours + ';'\n\n def parse(self, response):\n for match in response.xpath(\"//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div/parent::div\"):\n cityState = match.xpath(\".//div[contains(@class,'heading-text el-text')]/div/p/text()\").extract_first()\n cityString = cityState.split(\",\")[0].strip()\n stateString = cityState.split(\",\")[1].strip()\n\n addressString = match.xpath(\".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not(.//strong)]/text()\").extract_first().strip()\n postcodeString = addressString.split(stateString)[1].strip()\n addressString = addressString.split(stateString)[0].replace(',','').strip().strip(cityString).strip()\n\n\n if(match.xpath(\".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not (.//strong)]/br/following-sibling::text()\").extract_first() is None):\n phoneString = \"\"\n else:\n phoneString = match.xpath(\".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not (.//strong)]/br/following-sibling::text()\").extract_first()\n phoneString = phoneString.replace(' ','').strip()\n\n hoursString = \"\"\n for hoursMatch in match.xpath(\".//p[contains(@style,'text-align: center;')]/strong//following-sibling::text()\"):\n hoursString = hoursString +' '+self.store_hours(hoursMatch.extract().replace('\\n',''))\n hoursString = hoursString.strip(';').strip()\n\n name = match.xpath(\".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()\").extract_first()\n\n yield GeojsonPointItem(\n ref=name,\n name=name,\n addr_full=addressString,\n city=cityString,\n state=stateString,\n postcode=postcodeString,\n country=\"USA\",\n phone=phoneString,\n opening_hours=hoursString,\n website=response.urljoin(match.xpath(\".//a/@href\").extract_first()),\n )\n", "path": "locations/spiders/woods_coffee.py"}]}
1,842
634
gh_patches_debug_20028
rasdani/github-patches
git_diff
pypa__setuptools-3207
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Removal of convert_path break some packages ### setuptools version setuptools=61.0.0 ### Python version Python 3.9 ### OS all ### Additional environment information _No response_ ### Description Some packages such as Cartopy have been broken by upgrade to setuptools=61.0.0 https://github.com/SciTools/cartopy/issues/2021 This is because of commit https://github.com/pypa/setuptools/commit/1ee962510ba66578f6069e6a675b3715ad12ac0b which removes the import ```python from distutils.util import convert_path ``` This should at least be in the changelog as breakage of API ### Expected behavior Removing import from __init__.py should be documented in the [CHANGES.rst file](https://github.com/pypa/setuptools/blob/main/CHANGES.rst) for version 61.0.0 ### How to Reproduce Try installing Cartopy with lastest setuptools version ### Output ```console ImportError: cannot import name 'convert_path' from 'setuptools' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setuptools/__init__.py` Content: ``` 1 """Extensions to the 'distutils' for large or complex distributions""" 2 3 import functools 4 import os 5 import re 6 7 import _distutils_hack.override # noqa: F401 8 9 import distutils.core 10 from distutils.errors import DistutilsOptionError 11 12 from ._deprecation_warning import SetuptoolsDeprecationWarning 13 14 import setuptools.version 15 from setuptools.extension import Extension 16 from setuptools.dist import Distribution 17 from setuptools.depends import Require 18 from setuptools.discovery import PackageFinder, PEP420PackageFinder 19 from . import monkey 20 from . import logging 21 22 23 __all__ = [ 24 'setup', 25 'Distribution', 26 'Command', 27 'Extension', 28 'Require', 29 'SetuptoolsDeprecationWarning', 30 'find_packages', 31 'find_namespace_packages', 32 ] 33 34 __version__ = setuptools.version.__version__ 35 36 bootstrap_install_from = None 37 38 39 find_packages = PackageFinder.find 40 find_namespace_packages = PEP420PackageFinder.find 41 42 43 def _install_setup_requires(attrs): 44 # Note: do not use `setuptools.Distribution` directly, as 45 # our PEP 517 backend patch `distutils.core.Distribution`. 46 class MinimalDistribution(distutils.core.Distribution): 47 """ 48 A minimal version of a distribution for supporting the 49 fetch_build_eggs interface. 50 """ 51 52 def __init__(self, attrs): 53 _incl = 'dependency_links', 'setup_requires' 54 filtered = {k: attrs[k] for k in set(_incl) & set(attrs)} 55 super().__init__(filtered) 56 # Prevent accidentally triggering discovery with incomplete set of attrs 57 self.set_defaults._disable() 58 59 def finalize_options(self): 60 """ 61 Disable finalize_options to avoid building the working set. 62 Ref #2158. 63 """ 64 65 dist = MinimalDistribution(attrs) 66 67 # Honor setup.cfg's options. 68 dist.parse_config_files(ignore_option_errors=True) 69 if dist.setup_requires: 70 dist.fetch_build_eggs(dist.setup_requires) 71 72 73 def setup(**attrs): 74 # Make sure we have any requirements needed to interpret 'attrs'. 75 logging.configure() 76 _install_setup_requires(attrs) 77 return distutils.core.setup(**attrs) 78 79 80 setup.__doc__ = distutils.core.setup.__doc__ 81 82 83 _Command = monkey.get_unpatched(distutils.core.Command) 84 85 86 class Command(_Command): 87 __doc__ = _Command.__doc__ 88 89 command_consumes_arguments = False 90 91 def __init__(self, dist, **kw): 92 """ 93 Construct the command for dist, updating 94 vars(self) with any keyword parameters. 95 """ 96 super().__init__(dist) 97 vars(self).update(kw) 98 99 def _ensure_stringlike(self, option, what, default=None): 100 val = getattr(self, option) 101 if val is None: 102 setattr(self, option, default) 103 return default 104 elif not isinstance(val, str): 105 raise DistutilsOptionError( 106 "'%s' must be a %s (got `%s`)" % (option, what, val) 107 ) 108 return val 109 110 def ensure_string_list(self, option): 111 r"""Ensure that 'option' is a list of strings. If 'option' is 112 currently a string, we split it either on /,\s*/ or /\s+/, so 113 "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become 114 ["foo", "bar", "baz"]. 115 """ 116 val = getattr(self, option) 117 if val is None: 118 return 119 elif isinstance(val, str): 120 setattr(self, option, re.split(r',\s*|\s+', val)) 121 else: 122 if isinstance(val, list): 123 ok = all(isinstance(v, str) for v in val) 124 else: 125 ok = False 126 if not ok: 127 raise DistutilsOptionError( 128 "'%s' must be a list of strings (got %r)" % (option, val) 129 ) 130 131 def reinitialize_command(self, command, reinit_subcommands=0, **kw): 132 cmd = _Command.reinitialize_command(self, command, reinit_subcommands) 133 vars(cmd).update(kw) 134 return cmd 135 136 137 def _find_all_simple(path): 138 """ 139 Find all files under 'path' 140 """ 141 results = ( 142 os.path.join(base, file) 143 for base, dirs, files in os.walk(path, followlinks=True) 144 for file in files 145 ) 146 return filter(os.path.isfile, results) 147 148 149 def findall(dir=os.curdir): 150 """ 151 Find all files under 'dir' and return the list of full filenames. 152 Unless dir is '.', return full filenames with dir prepended. 153 """ 154 files = _find_all_simple(dir) 155 if dir == os.curdir: 156 make_rel = functools.partial(os.path.relpath, start=dir) 157 files = map(make_rel, files) 158 return list(files) 159 160 161 class sic(str): 162 """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)""" 163 164 165 # Apply monkey patches 166 monkey.patch_all() 167 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setuptools/__init__.py b/setuptools/__init__.py --- a/setuptools/__init__.py +++ b/setuptools/__init__.py @@ -3,11 +3,13 @@ import functools import os import re +import warnings import _distutils_hack.override # noqa: F401 import distutils.core from distutils.errors import DistutilsOptionError +from distutils.util import convert_path as _convert_path from ._deprecation_warning import SetuptoolsDeprecationWarning @@ -158,6 +160,19 @@ return list(files) +@functools.wraps(_convert_path) +def convert_path(pathname): + from inspect import cleandoc + + msg = """ + The function `convert_path` is considered internal and not part of the public API. + Its direct usage by 3rd-party packages is considered deprecated and the function + may be removed in the future. + """ + warnings.warn(cleandoc(msg), SetuptoolsDeprecationWarning) + return _convert_path(pathname) + + class sic(str): """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
{"golden_diff": "diff --git a/setuptools/__init__.py b/setuptools/__init__.py\n--- a/setuptools/__init__.py\n+++ b/setuptools/__init__.py\n@@ -3,11 +3,13 @@\n import functools\n import os\n import re\n+import warnings\n \n import _distutils_hack.override # noqa: F401\n \n import distutils.core\n from distutils.errors import DistutilsOptionError\n+from distutils.util import convert_path as _convert_path\n \n from ._deprecation_warning import SetuptoolsDeprecationWarning\n \n@@ -158,6 +160,19 @@\n return list(files)\n \n \n+@functools.wraps(_convert_path)\n+def convert_path(pathname):\n+ from inspect import cleandoc\n+\n+ msg = \"\"\"\n+ The function `convert_path` is considered internal and not part of the public API.\n+ Its direct usage by 3rd-party packages is considered deprecated and the function\n+ may be removed in the future.\n+ \"\"\"\n+ warnings.warn(cleandoc(msg), SetuptoolsDeprecationWarning)\n+ return _convert_path(pathname)\n+\n+\n class sic(str):\n \"\"\"Treat this string as-is (https://en.wikipedia.org/wiki/Sic)\"\"\"\n", "issue": "[BUG] Removal of convert_path break some packages\n### setuptools version\n\nsetuptools=61.0.0\n\n### Python version\n\nPython 3.9\n\n### OS\n\nall\n\n### Additional environment information\n\n_No response_\n\n### Description\n\nSome packages such as Cartopy have been broken by upgrade to setuptools=61.0.0\r\n\r\nhttps://github.com/SciTools/cartopy/issues/2021\r\n\r\nThis is because of commit https://github.com/pypa/setuptools/commit/1ee962510ba66578f6069e6a675b3715ad12ac0b which removes the import\r\n```python\r\nfrom distutils.util import convert_path\r\n```\r\n\r\nThis should at least be in the changelog as breakage of API\n\n### Expected behavior\n\nRemoving import from __init__.py should be documented in the [CHANGES.rst file](https://github.com/pypa/setuptools/blob/main/CHANGES.rst) for version 61.0.0\n\n### How to Reproduce\n\nTry installing Cartopy with lastest setuptools version\n\n### Output\n\n```console\r\nImportError: cannot import name 'convert_path' from 'setuptools'\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nimport functools\nimport os\nimport re\n\nimport _distutils_hack.override # noqa: F401\n\nimport distutils.core\nfrom distutils.errors import DistutilsOptionError\n\nfrom ._deprecation_warning import SetuptoolsDeprecationWarning\n\nimport setuptools.version\nfrom setuptools.extension import Extension\nfrom setuptools.dist import Distribution\nfrom setuptools.depends import Require\nfrom setuptools.discovery import PackageFinder, PEP420PackageFinder\nfrom . import monkey\nfrom . import logging\n\n\n__all__ = [\n 'setup',\n 'Distribution',\n 'Command',\n 'Extension',\n 'Require',\n 'SetuptoolsDeprecationWarning',\n 'find_packages',\n 'find_namespace_packages',\n]\n\n__version__ = setuptools.version.__version__\n\nbootstrap_install_from = None\n\n\nfind_packages = PackageFinder.find\nfind_namespace_packages = PEP420PackageFinder.find\n\n\ndef _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n # our PEP 517 backend patch `distutils.core.Distribution`.\n class MinimalDistribution(distutils.core.Distribution):\n \"\"\"\n A minimal version of a distribution for supporting the\n fetch_build_eggs interface.\n \"\"\"\n\n def __init__(self, attrs):\n _incl = 'dependency_links', 'setup_requires'\n filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}\n super().__init__(filtered)\n # Prevent accidentally triggering discovery with incomplete set of attrs\n self.set_defaults._disable()\n\n def finalize_options(self):\n \"\"\"\n Disable finalize_options to avoid building the working set.\n Ref #2158.\n \"\"\"\n\n dist = MinimalDistribution(attrs)\n\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n dist.fetch_build_eggs(dist.setup_requires)\n\n\ndef setup(**attrs):\n # Make sure we have any requirements needed to interpret 'attrs'.\n logging.configure()\n _install_setup_requires(attrs)\n return distutils.core.setup(**attrs)\n\n\nsetup.__doc__ = distutils.core.setup.__doc__\n\n\n_Command = monkey.get_unpatched(distutils.core.Command)\n\n\nclass Command(_Command):\n __doc__ = _Command.__doc__\n\n command_consumes_arguments = False\n\n def __init__(self, dist, **kw):\n \"\"\"\n Construct the command for dist, updating\n vars(self) with any keyword parameters.\n \"\"\"\n super().__init__(dist)\n vars(self).update(kw)\n\n def _ensure_stringlike(self, option, what, default=None):\n val = getattr(self, option)\n if val is None:\n setattr(self, option, default)\n return default\n elif not isinstance(val, str):\n raise DistutilsOptionError(\n \"'%s' must be a %s (got `%s`)\" % (option, what, val)\n )\n return val\n\n def ensure_string_list(self, option):\n r\"\"\"Ensure that 'option' is a list of strings. If 'option' is\n currently a string, we split it either on /,\\s*/ or /\\s+/, so\n \"foo bar baz\", \"foo,bar,baz\", and \"foo, bar baz\" all become\n [\"foo\", \"bar\", \"baz\"].\n \"\"\"\n val = getattr(self, option)\n if val is None:\n return\n elif isinstance(val, str):\n setattr(self, option, re.split(r',\\s*|\\s+', val))\n else:\n if isinstance(val, list):\n ok = all(isinstance(v, str) for v in val)\n else:\n ok = False\n if not ok:\n raise DistutilsOptionError(\n \"'%s' must be a list of strings (got %r)\" % (option, val)\n )\n\n def reinitialize_command(self, command, reinit_subcommands=0, **kw):\n cmd = _Command.reinitialize_command(self, command, reinit_subcommands)\n vars(cmd).update(kw)\n return cmd\n\n\ndef _find_all_simple(path):\n \"\"\"\n Find all files under 'path'\n \"\"\"\n results = (\n os.path.join(base, file)\n for base, dirs, files in os.walk(path, followlinks=True)\n for file in files\n )\n return filter(os.path.isfile, results)\n\n\ndef findall(dir=os.curdir):\n \"\"\"\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n \"\"\"\n files = _find_all_simple(dir)\n if dir == os.curdir:\n make_rel = functools.partial(os.path.relpath, start=dir)\n files = map(make_rel, files)\n return list(files)\n\n\nclass sic(str):\n \"\"\"Treat this string as-is (https://en.wikipedia.org/wiki/Sic)\"\"\"\n\n\n# Apply monkey patches\nmonkey.patch_all()\n", "path": "setuptools/__init__.py"}], "after_files": [{"content": "\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nimport functools\nimport os\nimport re\nimport warnings\n\nimport _distutils_hack.override # noqa: F401\n\nimport distutils.core\nfrom distutils.errors import DistutilsOptionError\nfrom distutils.util import convert_path as _convert_path\n\nfrom ._deprecation_warning import SetuptoolsDeprecationWarning\n\nimport setuptools.version\nfrom setuptools.extension import Extension\nfrom setuptools.dist import Distribution\nfrom setuptools.depends import Require\nfrom setuptools.discovery import PackageFinder, PEP420PackageFinder\nfrom . import monkey\nfrom . import logging\n\n\n__all__ = [\n 'setup',\n 'Distribution',\n 'Command',\n 'Extension',\n 'Require',\n 'SetuptoolsDeprecationWarning',\n 'find_packages',\n 'find_namespace_packages',\n]\n\n__version__ = setuptools.version.__version__\n\nbootstrap_install_from = None\n\n\nfind_packages = PackageFinder.find\nfind_namespace_packages = PEP420PackageFinder.find\n\n\ndef _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n # our PEP 517 backend patch `distutils.core.Distribution`.\n class MinimalDistribution(distutils.core.Distribution):\n \"\"\"\n A minimal version of a distribution for supporting the\n fetch_build_eggs interface.\n \"\"\"\n\n def __init__(self, attrs):\n _incl = 'dependency_links', 'setup_requires'\n filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}\n super().__init__(filtered)\n # Prevent accidentally triggering discovery with incomplete set of attrs\n self.set_defaults._disable()\n\n def finalize_options(self):\n \"\"\"\n Disable finalize_options to avoid building the working set.\n Ref #2158.\n \"\"\"\n\n dist = MinimalDistribution(attrs)\n\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n dist.fetch_build_eggs(dist.setup_requires)\n\n\ndef setup(**attrs):\n # Make sure we have any requirements needed to interpret 'attrs'.\n logging.configure()\n _install_setup_requires(attrs)\n return distutils.core.setup(**attrs)\n\n\nsetup.__doc__ = distutils.core.setup.__doc__\n\n\n_Command = monkey.get_unpatched(distutils.core.Command)\n\n\nclass Command(_Command):\n __doc__ = _Command.__doc__\n\n command_consumes_arguments = False\n\n def __init__(self, dist, **kw):\n \"\"\"\n Construct the command for dist, updating\n vars(self) with any keyword parameters.\n \"\"\"\n super().__init__(dist)\n vars(self).update(kw)\n\n def _ensure_stringlike(self, option, what, default=None):\n val = getattr(self, option)\n if val is None:\n setattr(self, option, default)\n return default\n elif not isinstance(val, str):\n raise DistutilsOptionError(\n \"'%s' must be a %s (got `%s`)\" % (option, what, val)\n )\n return val\n\n def ensure_string_list(self, option):\n r\"\"\"Ensure that 'option' is a list of strings. If 'option' is\n currently a string, we split it either on /,\\s*/ or /\\s+/, so\n \"foo bar baz\", \"foo,bar,baz\", and \"foo, bar baz\" all become\n [\"foo\", \"bar\", \"baz\"].\n \"\"\"\n val = getattr(self, option)\n if val is None:\n return\n elif isinstance(val, str):\n setattr(self, option, re.split(r',\\s*|\\s+', val))\n else:\n if isinstance(val, list):\n ok = all(isinstance(v, str) for v in val)\n else:\n ok = False\n if not ok:\n raise DistutilsOptionError(\n \"'%s' must be a list of strings (got %r)\" % (option, val)\n )\n\n def reinitialize_command(self, command, reinit_subcommands=0, **kw):\n cmd = _Command.reinitialize_command(self, command, reinit_subcommands)\n vars(cmd).update(kw)\n return cmd\n\n\ndef _find_all_simple(path):\n \"\"\"\n Find all files under 'path'\n \"\"\"\n results = (\n os.path.join(base, file)\n for base, dirs, files in os.walk(path, followlinks=True)\n for file in files\n )\n return filter(os.path.isfile, results)\n\n\ndef findall(dir=os.curdir):\n \"\"\"\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n \"\"\"\n files = _find_all_simple(dir)\n if dir == os.curdir:\n make_rel = functools.partial(os.path.relpath, start=dir)\n files = map(make_rel, files)\n return list(files)\n\n\n@functools.wraps(_convert_path)\ndef convert_path(pathname):\n from inspect import cleandoc\n\n msg = \"\"\"\n The function `convert_path` is considered internal and not part of the public API.\n Its direct usage by 3rd-party packages is considered deprecated and the function\n may be removed in the future.\n \"\"\"\n warnings.warn(cleandoc(msg), SetuptoolsDeprecationWarning)\n return _convert_path(pathname)\n\n\nclass sic(str):\n \"\"\"Treat this string as-is (https://en.wikipedia.org/wiki/Sic)\"\"\"\n\n\n# Apply monkey patches\nmonkey.patch_all()\n", "path": "setuptools/__init__.py"}]}
2,014
272
gh_patches_debug_1103
rasdani/github-patches
git_diff
Pylons__pyramid-2226
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update to Sphinx 1.3.4 when released There is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories. When 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 ############################################################################## 2 # 3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors. 4 # All Rights Reserved. 5 # 6 # This software is subject to the provisions of the BSD-like license at 7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany 8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL 9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, 10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND 11 # FITNESS FOR A PARTICULAR PURPOSE 12 # 13 ############################################################################## 14 15 import os 16 import sys 17 18 from setuptools import setup, find_packages 19 20 py_version = sys.version_info[:2] 21 22 PY3 = py_version[0] == 3 23 24 if PY3: 25 if py_version < (3, 2): 26 raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better') 27 else: 28 if py_version < (2, 6): 29 raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better') 30 31 here = os.path.abspath(os.path.dirname(__file__)) 32 try: 33 with open(os.path.join(here, 'README.rst')) as f: 34 README = f.read() 35 with open(os.path.join(here, 'CHANGES.txt')) as f: 36 CHANGES = f.read() 37 except IOError: 38 README = CHANGES = '' 39 40 install_requires=[ 41 'setuptools', 42 'WebOb >= 1.3.1', # request.domain and CookieProfile 43 'repoze.lru >= 0.4', # py3 compat 44 'zope.interface >= 3.8.0', # has zope.interface.registry 45 'zope.deprecation >= 3.5.0', # py3 compat 46 'venusian >= 1.0a3', # ``ignore`` 47 'translationstring >= 0.4', # py3 compat 48 'PasteDeploy >= 1.5.0', # py3 compat 49 ] 50 51 tests_require = [ 52 'WebTest >= 1.3.1', # py3 compat 53 ] 54 55 if not PY3: 56 tests_require.append('zope.component>=3.11.0') 57 58 docs_extras = [ 59 'Sphinx >= 1.3.1', 60 'docutils', 61 'repoze.sphinx.autointerface', 62 'pylons_sphinx_latesturl', 63 'pylons-sphinx-themes', 64 'sphinxcontrib-programoutput', 65 ] 66 67 testing_extras = tests_require + [ 68 'nose', 69 'coverage', 70 'virtualenv', # for scaffolding tests 71 ] 72 73 setup(name='pyramid', 74 version='1.5.8', 75 description='The Pyramid Web Framework, a Pylons project', 76 long_description=README + '\n\n' + CHANGES, 77 classifiers=[ 78 "Intended Audience :: Developers", 79 "Programming Language :: Python", 80 "Programming Language :: Python :: 2.6", 81 "Programming Language :: Python :: 2.7", 82 "Programming Language :: Python :: 3", 83 "Programming Language :: Python :: 3.2", 84 "Programming Language :: Python :: 3.3", 85 "Programming Language :: Python :: 3.4", 86 "Programming Language :: Python :: 3.5", 87 "Programming Language :: Python :: Implementation :: CPython", 88 "Programming Language :: Python :: Implementation :: PyPy", 89 "Framework :: Pyramid", 90 "Topic :: Internet :: WWW/HTTP", 91 "Topic :: Internet :: WWW/HTTP :: WSGI", 92 "License :: Repoze Public License", 93 ], 94 keywords='web wsgi pylons pyramid', 95 author="Chris McDonough, Agendaless Consulting", 96 author_email="pylons-discuss@googlegroups.com", 97 url="http://docs.pylonsproject.org/en/latest/docs/pyramid.html", 98 license="BSD-derived (http://www.repoze.org/LICENSE.txt)", 99 packages=find_packages(), 100 include_package_data=True, 101 zip_safe=False, 102 install_requires = install_requires, 103 extras_require = { 104 'testing':testing_extras, 105 'docs':docs_extras, 106 }, 107 tests_require = tests_require, 108 test_suite="pyramid.tests", 109 entry_points = """\ 110 [pyramid.scaffold] 111 starter=pyramid.scaffolds:StarterProjectTemplate 112 zodb=pyramid.scaffolds:ZODBProjectTemplate 113 alchemy=pyramid.scaffolds:AlchemyProjectTemplate 114 [console_scripts] 115 pcreate = pyramid.scripts.pcreate:main 116 pserve = pyramid.scripts.pserve:main 117 pshell = pyramid.scripts.pshell:main 118 proutes = pyramid.scripts.proutes:main 119 pviews = pyramid.scripts.pviews:main 120 ptweens = pyramid.scripts.ptweens:main 121 prequest = pyramid.scripts.prequest:main 122 pdistreport = pyramid.scripts.pdistreport:main 123 [paste.server_runner] 124 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner 125 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner 126 """ 127 ) 128 129 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -56,7 +56,7 @@ tests_require.append('zope.component>=3.11.0') docs_extras = [ - 'Sphinx >= 1.3.1', + 'Sphinx >= 1.3.4', 'docutils', 'repoze.sphinx.autointerface', 'pylons_sphinx_latesturl',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -56,7 +56,7 @@\n tests_require.append('zope.component>=3.11.0')\n \n docs_extras = [\n- 'Sphinx >= 1.3.1',\n+ 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n", "issue": "Update to Sphinx 1.3.4 when released\nThere is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.\n\nWhen 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 2):\n raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.1',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.5.8',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"pylons-discuss@googlegroups.com\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}], "after_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 2):\n raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.5.8',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"pylons-discuss@googlegroups.com\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}]}
1,782
106
gh_patches_debug_25558
rasdani/github-patches
git_diff
interlegis__sapl-2525
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Sem acesso anônimo de Documento Acessório de Audiencia Pública <!--- Forneça um resumo geral da _issue_ no título acima --> ## Comportamento Esperado Usuários anônimos poderem acessar documento acessório das Audiências Públicas ## Comportamento Atual Usuário anônimo não acessa a parte de "Documento Acessório" da Audiência Pública pedindo um login com a aplicação de documento administrativo "Restritiva", acredito que Audiência Pública não deveria se enquadrar nessa regra. ## Passos para Reproduzir (para bugs) <!--- Forneça um link para um exemplo, ou um conjunto de passos inequívocos --> <!--- para reproduzir esse bug. Inclua código para reproduzir, se relevante. --> 1. Com opção de Visibilidade de Documentos Administrativos "Restritiva" -Está deslogado - Institucional - Audiências Públicas - Acessar uma audiência cadastrada - clicar em Documento Acessório <!-- ## Imagens do Ocorrido --> ## Seu Ambiente <!--- Inclua detalhes relevantes sobre o ambiente em que você presenciou/experienciou o bug. --> * Versão usada (_Release_): 3.1.143 * Nome e versão do navegador: Chrome * Nome e versão do Sistema Operacional (desktop ou mobile): Windows 10 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sapl/audiencia/views.py` Content: ``` 1 import sapl 2 3 from django.http import HttpResponse 4 from django.core.urlresolvers import reverse 5 from django.views.decorators.clickjacking import xframe_options_exempt 6 from django.views.generic import UpdateView 7 from sapl.crud.base import RP_DETAIL, RP_LIST, Crud, MasterDetailCrud 8 9 from .forms import AudienciaForm, AnexoAudienciaPublicaForm 10 from .models import AudienciaPublica, AnexoAudienciaPublica 11 12 13 def index(request): 14 return HttpResponse("Audiência Pública") 15 16 17 class AudienciaCrud(Crud): 18 model = AudienciaPublica 19 public = [RP_LIST, RP_DETAIL, ] 20 21 class BaseMixin(Crud.BaseMixin): 22 list_field_names = ['numero', 'nome', 'tipo', 'materia', 23 'data'] 24 ordering = 'nome', 'numero', 'tipo', 'data' 25 26 class ListView(Crud.ListView): 27 paginate_by = 10 28 29 def get_context_data(self, **kwargs): 30 context = super().get_context_data(**kwargs) 31 32 audiencia_materia = {} 33 for o in context['object_list']: 34 # indexado pelo numero da audiencia 35 audiencia_materia[str(o.numero)] = o.materia 36 37 for row in context['rows']: 38 coluna_materia = row[3] # se mudar a ordem de listagem mudar aqui 39 if coluna_materia[0]: 40 materia = audiencia_materia[row[0][0]] 41 url_materia = reverse('sapl.materia:materialegislativa_detail', 42 kwargs={'pk': materia.id}) 43 row[3] = (coluna_materia[0], url_materia) 44 return context 45 46 class CreateView(Crud.CreateView): 47 form_class = AudienciaForm 48 49 def form_valid(self, form): 50 return super(Crud.CreateView, self).form_valid(form) 51 52 class UpdateView(Crud.UpdateView): 53 form_class = AudienciaForm 54 55 def get_initial(self): 56 initial = super(UpdateView, self).get_initial() 57 if self.object.materia: 58 initial['tipo_materia'] = self.object.materia.tipo.id 59 initial['numero_materia'] = self.object.materia.numero 60 initial['ano_materia'] = self.object.materia.ano 61 return initial 62 63 class DeleteView(Crud.DeleteView): 64 pass 65 66 class DetailView(Crud.DetailView): 67 68 layout_key = 'AudienciaPublicaDetail' 69 70 @xframe_options_exempt 71 def get(self, request, *args, **kwargs): 72 return super().get(request, *args, **kwargs) 73 74 75 class AudienciaPublicaMixin: 76 77 def has_permission(self): 78 app_config = sapl.base.models.AppConfig.objects.last() 79 if app_config and app_config.documentos_administrativos == 'O': 80 return True 81 82 return super().has_permission() 83 84 85 class AnexoAudienciaPublicaCrud(MasterDetailCrud): 86 model = AnexoAudienciaPublica 87 parent_field = 'audiencia' 88 help_topic = 'numeracao_docsacess' 89 90 class BaseMixin(MasterDetailCrud.BaseMixin): 91 list_field_names = ['assunto'] 92 93 class CreateView(MasterDetailCrud.CreateView): 94 form_class = AnexoAudienciaPublicaForm 95 layout_key = None 96 97 class UpdateView(MasterDetailCrud.UpdateView): 98 form_class = AnexoAudienciaPublicaForm 99 100 class ListView(AudienciaPublicaMixin, MasterDetailCrud.ListView): 101 102 def get_queryset(self): 103 qs = super(MasterDetailCrud.ListView, self).get_queryset() 104 kwargs = {self.crud.parent_field: self.kwargs['pk']} 105 return qs.filter(**kwargs).order_by('-data', '-id') 106 107 class DetailView(AudienciaPublicaMixin, 108 MasterDetailCrud.DetailView): 109 pass 110 ``` Path: `sapl/audiencia/urls.py` Content: ``` 1 from django.conf.urls import include, url 2 from sapl.audiencia.views import (index, AudienciaCrud,AnexoAudienciaPublicaCrud) 3 4 from .apps import AppConfig 5 6 app_name = AppConfig.name 7 8 urlpatterns = [ 9 url(r'^audiencia/', include(AudienciaCrud.get_urls() + 10 AnexoAudienciaPublicaCrud.get_urls())), 11 ] ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sapl/audiencia/urls.py b/sapl/audiencia/urls.py --- a/sapl/audiencia/urls.py +++ b/sapl/audiencia/urls.py @@ -1,11 +1,10 @@ from django.conf.urls import include, url -from sapl.audiencia.views import (index, AudienciaCrud,AnexoAudienciaPublicaCrud) +from sapl.audiencia.views import (index, AudienciaCrud, AnexoAudienciaPublicaCrud) from .apps import AppConfig app_name = AppConfig.name urlpatterns = [ - url(r'^audiencia/', include(AudienciaCrud.get_urls() + - AnexoAudienciaPublicaCrud.get_urls())), + url(r'^audiencia/', include(AudienciaCrud.get_urls() + AnexoAudienciaPublicaCrud.get_urls())), ] \ No newline at end of file diff --git a/sapl/audiencia/views.py b/sapl/audiencia/views.py --- a/sapl/audiencia/views.py +++ b/sapl/audiencia/views.py @@ -86,6 +86,7 @@ model = AnexoAudienciaPublica parent_field = 'audiencia' help_topic = 'numeracao_docsacess' + public = [RP_LIST, RP_DETAIL, ] class BaseMixin(MasterDetailCrud.BaseMixin): list_field_names = ['assunto'] @@ -104,7 +105,5 @@ kwargs = {self.crud.parent_field: self.kwargs['pk']} return qs.filter(**kwargs).order_by('-data', '-id') - class DetailView(AudienciaPublicaMixin, - MasterDetailCrud.DetailView): + class DetailView(AudienciaPublicaMixin, MasterDetailCrud.DetailView): pass - \ No newline at end of file
{"golden_diff": "diff --git a/sapl/audiencia/urls.py b/sapl/audiencia/urls.py\n--- a/sapl/audiencia/urls.py\n+++ b/sapl/audiencia/urls.py\n@@ -1,11 +1,10 @@\n from django.conf.urls import include, url\n-from sapl.audiencia.views import (index, AudienciaCrud,AnexoAudienciaPublicaCrud)\n+from sapl.audiencia.views import (index, AudienciaCrud, AnexoAudienciaPublicaCrud)\n \n from .apps import AppConfig\n \n app_name = AppConfig.name\n \n urlpatterns = [\n- url(r'^audiencia/', include(AudienciaCrud.get_urls() +\n- \t\t\t\t\t\t\tAnexoAudienciaPublicaCrud.get_urls())),\n+ url(r'^audiencia/', include(AudienciaCrud.get_urls() + AnexoAudienciaPublicaCrud.get_urls())),\n ]\n\\ No newline at end of file\ndiff --git a/sapl/audiencia/views.py b/sapl/audiencia/views.py\n--- a/sapl/audiencia/views.py\n+++ b/sapl/audiencia/views.py\n@@ -86,6 +86,7 @@\n model = AnexoAudienciaPublica\n parent_field = 'audiencia'\n help_topic = 'numeracao_docsacess'\n+ public = [RP_LIST, RP_DETAIL, ]\n \n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['assunto']\n@@ -104,7 +105,5 @@\n kwargs = {self.crud.parent_field: self.kwargs['pk']}\n return qs.filter(**kwargs).order_by('-data', '-id')\n \n- class DetailView(AudienciaPublicaMixin,\n- MasterDetailCrud.DetailView):\n+ class DetailView(AudienciaPublicaMixin, MasterDetailCrud.DetailView):\n pass\n- \n\\ No newline at end of file\n", "issue": "Sem acesso an\u00f4nimo de Documento Acess\u00f3rio de Audiencia P\u00fablica \n<!--- Forne\u00e7a um resumo geral da _issue_ no t\u00edtulo acima -->\r\n\r\n## Comportamento Esperado\r\nUsu\u00e1rios an\u00f4nimos poderem acessar documento acess\u00f3rio das Audi\u00eancias P\u00fablicas \r\n\r\n## Comportamento Atual\r\nUsu\u00e1rio an\u00f4nimo n\u00e3o acessa a parte de \"Documento Acess\u00f3rio\" da Audi\u00eancia P\u00fablica pedindo um login com a aplica\u00e7\u00e3o de documento administrativo \"Restritiva\", acredito que Audi\u00eancia P\u00fablica n\u00e3o deveria se enquadrar nessa regra.\r\n\r\n## Passos para Reproduzir (para bugs)\r\n<!--- Forne\u00e7a um link para um exemplo, ou um conjunto de passos inequ\u00edvocos -->\r\n<!--- para reproduzir esse bug. Inclua c\u00f3digo para reproduzir, se relevante. -->\r\n1. Com op\u00e7\u00e3o de Visibilidade de Documentos Administrativos \"Restritiva\" -Est\u00e1 deslogado - Institucional - Audi\u00eancias P\u00fablicas - Acessar uma audi\u00eancia cadastrada - clicar em Documento Acess\u00f3rio\r\n\r\n<!-- ## Imagens do Ocorrido -->\r\n\r\n## Seu Ambiente\r\n<!--- Inclua detalhes relevantes sobre o ambiente em que voc\u00ea presenciou/experienciou o bug. -->\r\n* Vers\u00e3o usada (_Release_): 3.1.143\r\n* Nome e vers\u00e3o do navegador: Chrome\r\n* Nome e vers\u00e3o do Sistema Operacional (desktop ou mobile): Windows 10\r\n\n", "before_files": [{"content": "import sapl\n\nfrom django.http import HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.generic import UpdateView\nfrom sapl.crud.base import RP_DETAIL, RP_LIST, Crud, MasterDetailCrud\n\nfrom .forms import AudienciaForm, AnexoAudienciaPublicaForm\nfrom .models import AudienciaPublica, AnexoAudienciaPublica\n\n\ndef index(request):\n return HttpResponse(\"Audi\u00eancia P\u00fablica\")\n\n\nclass AudienciaCrud(Crud):\n model = AudienciaPublica\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['numero', 'nome', 'tipo', 'materia',\n 'data'] \n ordering = 'nome', 'numero', 'tipo', 'data'\n\n class ListView(Crud.ListView):\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n audiencia_materia = {}\n for o in context['object_list']:\n # indexado pelo numero da audiencia\n audiencia_materia[str(o.numero)] = o.materia\n\n for row in context['rows']:\n coluna_materia = row[3] # se mudar a ordem de listagem mudar aqui\n if coluna_materia[0]:\n materia = audiencia_materia[row[0][0]]\n url_materia = reverse('sapl.materia:materialegislativa_detail',\n kwargs={'pk': materia.id})\n row[3] = (coluna_materia[0], url_materia)\n return context\n\n class CreateView(Crud.CreateView):\n form_class = AudienciaForm\n\n def form_valid(self, form):\n return super(Crud.CreateView, self).form_valid(form)\n\n class UpdateView(Crud.UpdateView):\n form_class = AudienciaForm\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n if self.object.materia:\n initial['tipo_materia'] = self.object.materia.tipo.id\n initial['numero_materia'] = self.object.materia.numero\n initial['ano_materia'] = self.object.materia.ano\n return initial\n \n class DeleteView(Crud.DeleteView):\n pass\n\n class DetailView(Crud.DetailView):\n\n layout_key = 'AudienciaPublicaDetail'\n\n @xframe_options_exempt\n def get(self, request, *args, **kwargs):\n return super().get(request, *args, **kwargs)\n\n\nclass AudienciaPublicaMixin:\n\n def has_permission(self):\n app_config = sapl.base.models.AppConfig.objects.last()\n if app_config and app_config.documentos_administrativos == 'O':\n return True\n\n return super().has_permission()\n\n\nclass AnexoAudienciaPublicaCrud(MasterDetailCrud):\n model = AnexoAudienciaPublica\n parent_field = 'audiencia'\n help_topic = 'numeracao_docsacess'\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['assunto']\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = AnexoAudienciaPublicaForm\n layout_key = None\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = AnexoAudienciaPublicaForm\n\n class ListView(AudienciaPublicaMixin, MasterDetailCrud.ListView):\n\n def get_queryset(self):\n qs = super(MasterDetailCrud.ListView, self).get_queryset()\n kwargs = {self.crud.parent_field: self.kwargs['pk']}\n return qs.filter(**kwargs).order_by('-data', '-id')\n\n class DetailView(AudienciaPublicaMixin,\n MasterDetailCrud.DetailView):\n pass\n ", "path": "sapl/audiencia/views.py"}, {"content": "from django.conf.urls import include, url\nfrom sapl.audiencia.views import (index, AudienciaCrud,AnexoAudienciaPublicaCrud)\n\nfrom .apps import AppConfig\n\napp_name = AppConfig.name\n\nurlpatterns = [\n url(r'^audiencia/', include(AudienciaCrud.get_urls() +\n \t\t\t\t\t\t\tAnexoAudienciaPublicaCrud.get_urls())),\n]", "path": "sapl/audiencia/urls.py"}], "after_files": [{"content": "import sapl\n\nfrom django.http import HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.generic import UpdateView\nfrom sapl.crud.base import RP_DETAIL, RP_LIST, Crud, MasterDetailCrud\n\nfrom .forms import AudienciaForm, AnexoAudienciaPublicaForm\nfrom .models import AudienciaPublica, AnexoAudienciaPublica\n\n\ndef index(request):\n return HttpResponse(\"Audi\u00eancia P\u00fablica\")\n\n\nclass AudienciaCrud(Crud):\n model = AudienciaPublica\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['numero', 'nome', 'tipo', 'materia',\n 'data'] \n ordering = 'nome', 'numero', 'tipo', 'data'\n\n class ListView(Crud.ListView):\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n audiencia_materia = {}\n for o in context['object_list']:\n # indexado pelo numero da audiencia\n audiencia_materia[str(o.numero)] = o.materia\n\n for row in context['rows']:\n coluna_materia = row[3] # se mudar a ordem de listagem mudar aqui\n if coluna_materia[0]:\n materia = audiencia_materia[row[0][0]]\n url_materia = reverse('sapl.materia:materialegislativa_detail',\n kwargs={'pk': materia.id})\n row[3] = (coluna_materia[0], url_materia)\n return context\n\n class CreateView(Crud.CreateView):\n form_class = AudienciaForm\n\n def form_valid(self, form):\n return super(Crud.CreateView, self).form_valid(form)\n\n class UpdateView(Crud.UpdateView):\n form_class = AudienciaForm\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n if self.object.materia:\n initial['tipo_materia'] = self.object.materia.tipo.id\n initial['numero_materia'] = self.object.materia.numero\n initial['ano_materia'] = self.object.materia.ano\n return initial\n \n class DeleteView(Crud.DeleteView):\n pass\n\n class DetailView(Crud.DetailView):\n\n layout_key = 'AudienciaPublicaDetail'\n\n @xframe_options_exempt\n def get(self, request, *args, **kwargs):\n return super().get(request, *args, **kwargs)\n\n\nclass AudienciaPublicaMixin:\n\n def has_permission(self):\n app_config = sapl.base.models.AppConfig.objects.last()\n if app_config and app_config.documentos_administrativos == 'O':\n return True\n\n return super().has_permission()\n\n\nclass AnexoAudienciaPublicaCrud(MasterDetailCrud):\n model = AnexoAudienciaPublica\n parent_field = 'audiencia'\n help_topic = 'numeracao_docsacess'\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['assunto']\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = AnexoAudienciaPublicaForm\n layout_key = None\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = AnexoAudienciaPublicaForm\n\n class ListView(AudienciaPublicaMixin, MasterDetailCrud.ListView):\n\n def get_queryset(self):\n qs = super(MasterDetailCrud.ListView, self).get_queryset()\n kwargs = {self.crud.parent_field: self.kwargs['pk']}\n return qs.filter(**kwargs).order_by('-data', '-id')\n\n class DetailView(AudienciaPublicaMixin, MasterDetailCrud.DetailView):\n pass\n", "path": "sapl/audiencia/views.py"}, {"content": "from django.conf.urls import include, url\nfrom sapl.audiencia.views import (index, AudienciaCrud, AnexoAudienciaPublicaCrud)\n\nfrom .apps import AppConfig\n\napp_name = AppConfig.name\n\nurlpatterns = [\n url(r'^audiencia/', include(AudienciaCrud.get_urls() + AnexoAudienciaPublicaCrud.get_urls())),\n]", "path": "sapl/audiencia/urls.py"}]}
1,761
410
gh_patches_debug_23485
rasdani/github-patches
git_diff
mindsdb__mindsdb-1954
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- selecting from datasource error Repoted by David F. ``` use Postgres_Sample; SELECT * FROM data.insurance LIMIT 200; ``` error: ``` SQL Error [1149] [42000]: 'str' object has no attribute '__name__' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py` Content: ``` 1 import pandas as pd 2 from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender 3 4 from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode 5 from mindsdb.utilities.log import log 6 7 8 class IntegrationDataNode(DataNode): 9 type = 'integration' 10 11 def __init__(self, integration_name, data_store, ds_type): 12 self.integration_name = integration_name 13 self.data_store = data_store 14 self.ds_type = ds_type 15 16 def get_type(self): 17 return self.type 18 19 def get_tables(self): 20 return [] 21 22 def has_table(self, tableName): 23 return True 24 25 def get_table_columns(self, tableName): 26 return [] 27 28 def select(self, query): 29 if self.ds_type in ('postgres', 'snowflake'): 30 dialect = 'postgres' 31 else: 32 dialect = 'mysql' 33 render = SqlalchemyRender(dialect) 34 try: 35 query_str = render.get_string(query, with_failback=False) 36 except Exception as e: 37 log.error(f"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}") 38 query_str = render.get_string(query, with_failback=True) 39 40 dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': query_str}) 41 data = dso.df.to_dict(orient='records') 42 column_names = list(dso.df.columns) 43 44 for column_name in column_names: 45 if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): 46 pass_data = dso.df[column_name].dt.to_pydatetime() 47 for i, rec in enumerate(data): 48 rec[column_name] = pass_data[i].timestamp() 49 50 if len(column_names) == 0: 51 column_names = ['dataframe_is_empty'] 52 53 return data, column_names 54 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py --- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py +++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py @@ -26,16 +26,19 @@ return [] def select(self, query): - if self.ds_type in ('postgres', 'snowflake'): - dialect = 'postgres' + if isinstance(query, str): + query_str = query else: - dialect = 'mysql' - render = SqlalchemyRender(dialect) - try: - query_str = render.get_string(query, with_failback=False) - except Exception as e: - log.error(f"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}") - query_str = render.get_string(query, with_failback=True) + if self.ds_type in ('postgres', 'snowflake'): + dialect = 'postgres' + else: + dialect = 'mysql' + render = SqlalchemyRender(dialect) + try: + query_str = render.get_string(query, with_failback=False) + except Exception as e: + log.error(f"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}") + query_str = render.get_string(query, with_failback=True) dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': query_str}) data = dso.df.to_dict(orient='records')
{"golden_diff": "diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py\n--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py\n+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py\n@@ -26,16 +26,19 @@\n return []\n \n def select(self, query):\n- if self.ds_type in ('postgres', 'snowflake'):\n- dialect = 'postgres'\n+ if isinstance(query, str):\n+ query_str = query\n else:\n- dialect = 'mysql'\n- render = SqlalchemyRender(dialect)\n- try:\n- query_str = render.get_string(query, with_failback=False)\n- except Exception as e:\n- log.error(f\"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}\")\n- query_str = render.get_string(query, with_failback=True)\n+ if self.ds_type in ('postgres', 'snowflake'):\n+ dialect = 'postgres'\n+ else:\n+ dialect = 'mysql'\n+ render = SqlalchemyRender(dialect)\n+ try:\n+ query_str = render.get_string(query, with_failback=False)\n+ except Exception as e:\n+ log.error(f\"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}\")\n+ query_str = render.get_string(query, with_failback=True)\n \n dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': query_str})\n data = dso.df.to_dict(orient='records')\n", "issue": "selecting from datasource error\nRepoted by David F.\r\n```\r\nuse Postgres_Sample;\r\n\r\nSELECT * FROM data.insurance LIMIT 200;\r\n```\r\nerror:\r\n```\r\nSQL Error [1149] [42000]: 'str' object has no attribute '__name__'\r\n```\n", "before_files": [{"content": "import pandas as pd\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\n\nfrom mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode\nfrom mindsdb.utilities.log import log\n\n\nclass IntegrationDataNode(DataNode):\n type = 'integration'\n\n def __init__(self, integration_name, data_store, ds_type):\n self.integration_name = integration_name\n self.data_store = data_store\n self.ds_type = ds_type\n\n def get_type(self):\n return self.type\n\n def get_tables(self):\n return []\n\n def has_table(self, tableName):\n return True\n\n def get_table_columns(self, tableName):\n return []\n\n def select(self, query):\n if self.ds_type in ('postgres', 'snowflake'):\n dialect = 'postgres'\n else:\n dialect = 'mysql'\n render = SqlalchemyRender(dialect)\n try:\n query_str = render.get_string(query, with_failback=False)\n except Exception as e:\n log.error(f\"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}\")\n query_str = render.get_string(query, with_failback=True)\n\n dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': query_str})\n data = dso.df.to_dict(orient='records')\n column_names = list(dso.df.columns)\n\n for column_name in column_names:\n if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]):\n pass_data = dso.df[column_name].dt.to_pydatetime()\n for i, rec in enumerate(data):\n rec[column_name] = pass_data[i].timestamp()\n\n if len(column_names) == 0:\n column_names = ['dataframe_is_empty']\n\n return data, column_names\n", "path": "mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py"}], "after_files": [{"content": "import pandas as pd\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\n\nfrom mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode\nfrom mindsdb.utilities.log import log\n\n\nclass IntegrationDataNode(DataNode):\n type = 'integration'\n\n def __init__(self, integration_name, data_store, ds_type):\n self.integration_name = integration_name\n self.data_store = data_store\n self.ds_type = ds_type\n\n def get_type(self):\n return self.type\n\n def get_tables(self):\n return []\n\n def has_table(self, tableName):\n return True\n\n def get_table_columns(self, tableName):\n return []\n\n def select(self, query):\n if isinstance(query, str):\n query_str = query\n else:\n if self.ds_type in ('postgres', 'snowflake'):\n dialect = 'postgres'\n else:\n dialect = 'mysql'\n render = SqlalchemyRender(dialect)\n try:\n query_str = render.get_string(query, with_failback=False)\n except Exception as e:\n log.error(f\"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}\")\n query_str = render.get_string(query, with_failback=True)\n\n dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': query_str})\n data = dso.df.to_dict(orient='records')\n column_names = list(dso.df.columns)\n\n for column_name in column_names:\n if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]):\n pass_data = dso.df[column_name].dt.to_pydatetime()\n for i, rec in enumerate(data):\n rec[column_name] = pass_data[i].timestamp()\n\n if len(column_names) == 0:\n column_names = ['dataframe_is_empty']\n\n return data, column_names\n", "path": "mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py"}]}
836
381
gh_patches_debug_40307
rasdani/github-patches
git_diff
Project-MONAI__MONAI-1946
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add `strict_shape` option to CheckpointLoader **Is your feature request related to a problem? Please describe.** Currently, we don't support the transfer-learning case that load a checkpoint with same layer names but different shape. We can refer to below code: ```py model_3 = get_model_with_3_classes() state_dict_model_4 = torch.load("best_model_4.pt") @trainer.on(Events.STARTED, model_3, state_dict_model_4) def permissive_model_loader(model, state_dict): this_state_dict = model.state_dict() matched_state_dict = { k: v for k, v in state_dict.items() if k in this_state_dict and v.shape == this_state_dict[k].shape } model.load_state_dict(matched_state_dict, strict=False) trainer.run(...) ``` Thanks. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `monai/handlers/checkpoint_loader.py` Content: ``` 1 # Copyright 2020 - 2021 MONAI Consortium 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # Unless required by applicable law or agreed to in writing, software 7 # distributed under the License is distributed on an "AS IS" BASIS, 8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 # See the License for the specific language governing permissions and 10 # limitations under the License. 11 12 import logging 13 from typing import TYPE_CHECKING, Dict, Optional 14 15 import torch 16 17 from monai.utils import exact_version, optional_import 18 19 Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") 20 Checkpoint, _ = optional_import("ignite.handlers", "0.4.4", exact_version, "Checkpoint") 21 if TYPE_CHECKING: 22 from ignite.engine import Engine 23 else: 24 Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") 25 26 27 class CheckpointLoader: 28 """ 29 CheckpointLoader acts as an Ignite handler to load checkpoint data from file. 30 It can load variables for network, optimizer, lr_scheduler, etc. 31 If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead 32 as PyTorch recommended and then use this loader to load the model. 33 34 Args: 35 load_path: the file path of checkpoint, it should be a PyTorch `pth` file. 36 load_dict: target objects that load checkpoint to. examples:: 37 38 {'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler} 39 40 name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``. 41 map_location: when loading the module for distributed training/evaluation, 42 need to provide an appropriate map_location argument to prevent a process 43 to step into others’ devices. If map_location is missing, torch.load will 44 first load the module to CPU and then copy each parameter to where it was 45 saved, which would result in all processes on the same machine using the 46 same set of devices. 47 strict: whether to strictly enforce that the keys in :attr:`state_dict` match the keys 48 returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` 49 50 """ 51 52 def __init__( 53 self, 54 load_path: str, 55 load_dict: Dict, 56 name: Optional[str] = None, 57 map_location: Optional[Dict] = None, 58 strict: bool = True, 59 ) -> None: 60 if load_path is None: 61 raise AssertionError("must provide clear path to load checkpoint.") 62 self.load_path = load_path 63 if not (load_dict is not None and len(load_dict) > 0): 64 raise AssertionError("must provide target objects to load.") 65 self.logger = logging.getLogger(name) 66 self.load_dict = load_dict 67 self._name = name 68 self.map_location = map_location 69 self.strict = strict 70 71 def attach(self, engine: Engine) -> None: 72 """ 73 Args: 74 engine: Ignite Engine, it can be a trainer, validator or evaluator. 75 """ 76 if self._name is None: 77 self.logger = engine.logger 78 engine.add_event_handler(Events.STARTED, self) 79 80 def __call__(self, engine: Engine) -> None: 81 """ 82 Args: 83 engine: Ignite Engine, it can be a trainer, validator or evaluator. 84 """ 85 checkpoint = torch.load(self.load_path, map_location=self.map_location) 86 87 # save current max epochs setting in the engine, don't overwrite it if larger than max_epochs in checkpoint 88 prior_max_epochs = engine.state.max_epochs 89 Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint, strict=self.strict) 90 if engine.state.epoch > prior_max_epochs: 91 raise ValueError( 92 f"Epoch count ({engine.state.epoch}) in checkpoint is larger than " 93 f"the `engine.state.max_epochs` ({prior_max_epochs}) of engine. To further train from checkpoint, " 94 "construct trainer with `max_epochs` larger than checkpoint's epoch count. " 95 "To use checkpoint for inference, no need to load state_dict for the engine." 96 ) 97 engine.state.max_epochs = prior_max_epochs 98 99 self.logger.info(f"Restored all variables from {self.load_path}") 100 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py --- a/monai/handlers/checkpoint_loader.py +++ b/monai/handlers/checkpoint_loader.py @@ -13,6 +13,7 @@ from typing import TYPE_CHECKING, Dict, Optional import torch +import torch.nn as nn from monai.utils import exact_version, optional_import @@ -44,8 +45,12 @@ first load the module to CPU and then copy each parameter to where it was saved, which would result in all processes on the same machine using the same set of devices. - strict: whether to strictly enforce that the keys in :attr:`state_dict` match the keys - returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` + strict: whether to strictly enforce that the keys in `state_dict` match the keys + returned by `torch.nn.Module.state_dict` function. default to `True`. + strict_shape: whether to enforce the data shape of the matched layers in the checkpoint, + `if `False`, it will skip the layers that have different data shape with checkpoint content. + This can be useful advanced feature for transfer learning. users should totally + understand which layers will have different shape. default to `True`. """ @@ -56,6 +61,7 @@ name: Optional[str] = None, map_location: Optional[Dict] = None, strict: bool = True, + strict_shape: bool = True, ) -> None: if load_path is None: raise AssertionError("must provide clear path to load checkpoint.") @@ -67,6 +73,7 @@ self._name = name self.map_location = map_location self.strict = strict + self.strict_shape = strict_shape def attach(self, engine: Engine) -> None: """ @@ -84,6 +91,20 @@ """ checkpoint = torch.load(self.load_path, map_location=self.map_location) + if not self.strict_shape: + k, _ = list(self.load_dict.items())[0] + # single object and checkpoint is directly a state_dict + if len(self.load_dict) == 1 and k not in checkpoint: + checkpoint = {k: checkpoint} + + # skip items that don't match data shape + for k, obj in self.load_dict.items(): + if isinstance(obj, (nn.DataParallel, nn.parallel.DistributedDataParallel)): + obj = obj.module + if isinstance(obj, torch.nn.Module): + d = obj.state_dict() + checkpoint[k] = {k: v for k, v in checkpoint[k].items() if k in d and v.shape == d[k].shape} + # save current max epochs setting in the engine, don't overwrite it if larger than max_epochs in checkpoint prior_max_epochs = engine.state.max_epochs Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint, strict=self.strict)
{"golden_diff": "diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py\n--- a/monai/handlers/checkpoint_loader.py\n+++ b/monai/handlers/checkpoint_loader.py\n@@ -13,6 +13,7 @@\n from typing import TYPE_CHECKING, Dict, Optional\n \n import torch\n+import torch.nn as nn\n \n from monai.utils import exact_version, optional_import\n \n@@ -44,8 +45,12 @@\n first load the module to CPU and then copy each parameter to where it was\n saved, which would result in all processes on the same machine using the\n same set of devices.\n- strict: whether to strictly enforce that the keys in :attr:`state_dict` match the keys\n- returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True``\n+ strict: whether to strictly enforce that the keys in `state_dict` match the keys\n+ returned by `torch.nn.Module.state_dict` function. default to `True`.\n+ strict_shape: whether to enforce the data shape of the matched layers in the checkpoint,\n+ `if `False`, it will skip the layers that have different data shape with checkpoint content.\n+ This can be useful advanced feature for transfer learning. users should totally\n+ understand which layers will have different shape. default to `True`.\n \n \"\"\"\n \n@@ -56,6 +61,7 @@\n name: Optional[str] = None,\n map_location: Optional[Dict] = None,\n strict: bool = True,\n+ strict_shape: bool = True,\n ) -> None:\n if load_path is None:\n raise AssertionError(\"must provide clear path to load checkpoint.\")\n@@ -67,6 +73,7 @@\n self._name = name\n self.map_location = map_location\n self.strict = strict\n+ self.strict_shape = strict_shape\n \n def attach(self, engine: Engine) -> None:\n \"\"\"\n@@ -84,6 +91,20 @@\n \"\"\"\n checkpoint = torch.load(self.load_path, map_location=self.map_location)\n \n+ if not self.strict_shape:\n+ k, _ = list(self.load_dict.items())[0]\n+ # single object and checkpoint is directly a state_dict\n+ if len(self.load_dict) == 1 and k not in checkpoint:\n+ checkpoint = {k: checkpoint}\n+\n+ # skip items that don't match data shape\n+ for k, obj in self.load_dict.items():\n+ if isinstance(obj, (nn.DataParallel, nn.parallel.DistributedDataParallel)):\n+ obj = obj.module\n+ if isinstance(obj, torch.nn.Module):\n+ d = obj.state_dict()\n+ checkpoint[k] = {k: v for k, v in checkpoint[k].items() if k in d and v.shape == d[k].shape}\n+\n # save current max epochs setting in the engine, don't overwrite it if larger than max_epochs in checkpoint\n prior_max_epochs = engine.state.max_epochs\n Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint, strict=self.strict)\n", "issue": "Add `strict_shape` option to CheckpointLoader\n**Is your feature request related to a problem? Please describe.**\r\nCurrently, we don't support the transfer-learning case that load a checkpoint with same layer names but different shape.\r\nWe can refer to below code:\r\n```py\r\nmodel_3 = get_model_with_3_classes()\r\nstate_dict_model_4 = torch.load(\"best_model_4.pt\")\r\n\r\n@trainer.on(Events.STARTED, model_3, state_dict_model_4)\r\ndef permissive_model_loader(model, state_dict):\r\n this_state_dict = model.state_dict()\r\n matched_state_dict = {\r\n k: v for k, v in state_dict.items()\r\n if k in this_state_dict and v.shape == this_state_dict[k].shape\r\n }\r\n model.load_state_dict(matched_state_dict, strict=False)\r\n\r\ntrainer.run(...)\r\n```\r\n\r\nThanks.\r\n\n", "before_files": [{"content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import TYPE_CHECKING, Dict, Optional\n\nimport torch\n\nfrom monai.utils import exact_version, optional_import\n\nEvents, _ = optional_import(\"ignite.engine\", \"0.4.4\", exact_version, \"Events\")\nCheckpoint, _ = optional_import(\"ignite.handlers\", \"0.4.4\", exact_version, \"Checkpoint\")\nif TYPE_CHECKING:\n from ignite.engine import Engine\nelse:\n Engine, _ = optional_import(\"ignite.engine\", \"0.4.4\", exact_version, \"Engine\")\n\n\nclass CheckpointLoader:\n \"\"\"\n CheckpointLoader acts as an Ignite handler to load checkpoint data from file.\n It can load variables for network, optimizer, lr_scheduler, etc.\n If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead\n as PyTorch recommended and then use this loader to load the model.\n\n Args:\n load_path: the file path of checkpoint, it should be a PyTorch `pth` file.\n load_dict: target objects that load checkpoint to. examples::\n\n {'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}\n\n name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.\n map_location: when loading the module for distributed training/evaluation,\n need to provide an appropriate map_location argument to prevent a process\n to step into others\u2019 devices. If map_location is missing, torch.load will\n first load the module to CPU and then copy each parameter to where it was\n saved, which would result in all processes on the same machine using the\n same set of devices.\n strict: whether to strictly enforce that the keys in :attr:`state_dict` match the keys\n returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True``\n\n \"\"\"\n\n def __init__(\n self,\n load_path: str,\n load_dict: Dict,\n name: Optional[str] = None,\n map_location: Optional[Dict] = None,\n strict: bool = True,\n ) -> None:\n if load_path is None:\n raise AssertionError(\"must provide clear path to load checkpoint.\")\n self.load_path = load_path\n if not (load_dict is not None and len(load_dict) > 0):\n raise AssertionError(\"must provide target objects to load.\")\n self.logger = logging.getLogger(name)\n self.load_dict = load_dict\n self._name = name\n self.map_location = map_location\n self.strict = strict\n\n def attach(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n if self._name is None:\n self.logger = engine.logger\n engine.add_event_handler(Events.STARTED, self)\n\n def __call__(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n checkpoint = torch.load(self.load_path, map_location=self.map_location)\n\n # save current max epochs setting in the engine, don't overwrite it if larger than max_epochs in checkpoint\n prior_max_epochs = engine.state.max_epochs\n Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint, strict=self.strict)\n if engine.state.epoch > prior_max_epochs:\n raise ValueError(\n f\"Epoch count ({engine.state.epoch}) in checkpoint is larger than \"\n f\"the `engine.state.max_epochs` ({prior_max_epochs}) of engine. To further train from checkpoint, \"\n \"construct trainer with `max_epochs` larger than checkpoint's epoch count. \"\n \"To use checkpoint for inference, no need to load state_dict for the engine.\"\n )\n engine.state.max_epochs = prior_max_epochs\n\n self.logger.info(f\"Restored all variables from {self.load_path}\")\n", "path": "monai/handlers/checkpoint_loader.py"}], "after_files": [{"content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import TYPE_CHECKING, Dict, Optional\n\nimport torch\nimport torch.nn as nn\n\nfrom monai.utils import exact_version, optional_import\n\nEvents, _ = optional_import(\"ignite.engine\", \"0.4.4\", exact_version, \"Events\")\nCheckpoint, _ = optional_import(\"ignite.handlers\", \"0.4.4\", exact_version, \"Checkpoint\")\nif TYPE_CHECKING:\n from ignite.engine import Engine\nelse:\n Engine, _ = optional_import(\"ignite.engine\", \"0.4.4\", exact_version, \"Engine\")\n\n\nclass CheckpointLoader:\n \"\"\"\n CheckpointLoader acts as an Ignite handler to load checkpoint data from file.\n It can load variables for network, optimizer, lr_scheduler, etc.\n If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead\n as PyTorch recommended and then use this loader to load the model.\n\n Args:\n load_path: the file path of checkpoint, it should be a PyTorch `pth` file.\n load_dict: target objects that load checkpoint to. examples::\n\n {'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}\n\n name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.\n map_location: when loading the module for distributed training/evaluation,\n need to provide an appropriate map_location argument to prevent a process\n to step into others\u2019 devices. If map_location is missing, torch.load will\n first load the module to CPU and then copy each parameter to where it was\n saved, which would result in all processes on the same machine using the\n same set of devices.\n strict: whether to strictly enforce that the keys in `state_dict` match the keys\n returned by `torch.nn.Module.state_dict` function. default to `True`.\n strict_shape: whether to enforce the data shape of the matched layers in the checkpoint,\n `if `False`, it will skip the layers that have different data shape with checkpoint content.\n This can be useful advanced feature for transfer learning. users should totally\n understand which layers will have different shape. default to `True`.\n\n \"\"\"\n\n def __init__(\n self,\n load_path: str,\n load_dict: Dict,\n name: Optional[str] = None,\n map_location: Optional[Dict] = None,\n strict: bool = True,\n strict_shape: bool = True,\n ) -> None:\n if load_path is None:\n raise AssertionError(\"must provide clear path to load checkpoint.\")\n self.load_path = load_path\n if not (load_dict is not None and len(load_dict) > 0):\n raise AssertionError(\"must provide target objects to load.\")\n self.logger = logging.getLogger(name)\n self.load_dict = load_dict\n self._name = name\n self.map_location = map_location\n self.strict = strict\n self.strict_shape = strict_shape\n\n def attach(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n if self._name is None:\n self.logger = engine.logger\n engine.add_event_handler(Events.STARTED, self)\n\n def __call__(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n checkpoint = torch.load(self.load_path, map_location=self.map_location)\n\n if not self.strict_shape:\n k, _ = list(self.load_dict.items())[0]\n # single object and checkpoint is directly a state_dict\n if len(self.load_dict) == 1 and k not in checkpoint:\n checkpoint = {k: checkpoint}\n\n # skip items that don't match data shape\n for k, obj in self.load_dict.items():\n if isinstance(obj, (nn.DataParallel, nn.parallel.DistributedDataParallel)):\n obj = obj.module\n if isinstance(obj, torch.nn.Module):\n d = obj.state_dict()\n checkpoint[k] = {k: v for k, v in checkpoint[k].items() if k in d and v.shape == d[k].shape}\n\n # save current max epochs setting in the engine, don't overwrite it if larger than max_epochs in checkpoint\n prior_max_epochs = engine.state.max_epochs\n Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint, strict=self.strict)\n if engine.state.epoch > prior_max_epochs:\n raise ValueError(\n f\"Epoch count ({engine.state.epoch}) in checkpoint is larger than \"\n f\"the `engine.state.max_epochs` ({prior_max_epochs}) of engine. To further train from checkpoint, \"\n \"construct trainer with `max_epochs` larger than checkpoint's epoch count. \"\n \"To use checkpoint for inference, no need to load state_dict for the engine.\"\n )\n engine.state.max_epochs = prior_max_epochs\n\n self.logger.info(f\"Restored all variables from {self.load_path}\")\n", "path": "monai/handlers/checkpoint_loader.py"}]}
1,612
680
gh_patches_debug_23199
rasdani/github-patches
git_diff
great-expectations__great_expectations-7252
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use cleaner solution for non-truncating division in python 2 Prefer `from __future__ import division` to `1.*x/y` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py` Content: ``` 1 from typing import Optional 2 3 import numpy as np 4 5 from great_expectations.core.expectation_configuration import ExpectationConfiguration 6 from great_expectations.execution_engine import PandasExecutionEngine 7 from great_expectations.expectations.expectation import MulticolumnMapExpectation 8 from great_expectations.expectations.metrics.map_metric_provider import ( 9 MulticolumnMapMetricProvider, 10 multicolumn_condition_partial, 11 ) 12 13 14 # This class defines a Metric to support your Expectation. 15 # For most MulticolumnMapExpectations, the main business logic for calculation will live in this class. 16 class MulticolumnValuesNotAllNull(MulticolumnMapMetricProvider): 17 18 # This is the id string that will be used to reference your metric. 19 20 condition_metric_name = "multicolumn_values.not_all_null" 21 22 # These point your metric at the provided keys to facilitate calculation 23 condition_domain_keys = ( 24 "batch_id", 25 "table", 26 "column_list", 27 "row_condition", 28 "condition_parser", 29 "ignore_row_if", 30 ) 31 condition_value_keys = () 32 33 # This method implements the core logic for the PandasExecutionEngine 34 35 @multicolumn_condition_partial(engine=PandasExecutionEngine) 36 def _pandas(cls, column_list, **kwargs): 37 row_wise_cond = column_list.isna().sum(axis=1) < len(column_list) 38 return row_wise_cond 39 40 # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine 41 # @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine) 42 # def _sqlalchemy(cls, column_list, **kwargs): 43 # raise NotImplementedError 44 45 # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine 46 # @multicolumn_condition_partial(engine=SparkDFExecutionEngine) 47 # def _spark(cls, column_list, **kwargs): 48 # raise NotImplementedError 49 50 51 # This class defines the Expectation itself 52 class ExpectMulticolumnValuesNotToBeAllNull(MulticolumnMapExpectation): 53 """Expect the certain set of columns not to be null at the same time.""" 54 55 # These examples will be shown in the public gallery. 56 # They will also be executed as unit tests for your Expectation. 57 examples = [ 58 { 59 "data": { 60 "no_nulls": [5, 6, 5, 12, -3], 61 "some_nulls": [np.nan, -3, np.nan, np.nan, -9], 62 "one_non_null": [np.nan, 2, np.nan, np.nan, np.nan], 63 "all_nulls": [np.nan, np.nan, np.nan, np.nan, np.nan], 64 }, 65 "tests": [ 66 { 67 "title": "basic_positive_test", 68 "exact_match_out": False, 69 "include_in_gallery": True, 70 "in": {"column_list": ["no_nulls", "some_nulls"]}, 71 "out": { 72 "success": True, 73 }, 74 }, 75 { 76 "title": "basic_positive_test", 77 "exact_match_out": False, 78 "include_in_gallery": True, 79 "in": { 80 "column_list": ["some_nulls", "one_non_null"], 81 "mostly": 0.4, 82 }, 83 "out": { 84 "success": True, 85 }, 86 }, 87 { 88 "title": "basic_negative_test", 89 "exact_match_out": False, 90 "include_in_gallery": True, 91 "in": { 92 "column_list": ["some_nulls", "one_non_null", "all_nulls"], 93 "mostly": 1, 94 }, 95 "out": { 96 "success": False, 97 }, 98 }, 99 ], 100 "test_backends": [ 101 { 102 "backend": "pandas", 103 "dialects": None, 104 }, 105 ], 106 } 107 ] 108 109 # This is the id string of the Metric used by this Expectation. 110 # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. 111 112 map_metric = "multicolumn_values.not_all_null" 113 114 # This is a list of parameter names that can affect whether the Expectation evaluates to True or False 115 success_keys = ( 116 "column_list", 117 "mostly", 118 ) 119 120 # This dictionary contains default values for any parameters that should have default values 121 default_kwarg_values = {} 122 123 def validate_configuration( 124 self, configuration: Optional[ExpectationConfiguration] = None 125 ) -> None: 126 """ 127 Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that 128 necessary configuration arguments have been provided for the validation of the expectation. 129 130 Args: 131 configuration (OPTIONAL[ExpectationConfiguration]): \ 132 An optional Expectation Configuration entry that will be used to configure the expectation 133 Returns: 134 None. Raises InvalidExpectationConfigurationError if the config is not validated successfully 135 """ 136 137 super().validate_configuration(configuration) 138 configuration = configuration or self.configuration 139 140 # # Check other things in configuration.kwargs and raise Exceptions if needed 141 # try: 142 # assert ( 143 # ... 144 # ), "message" 145 # assert ( 146 # ... 147 # ), "message" 148 # except AssertionError as e: 149 # raise InvalidExpectationConfigurationError(str(e)) 150 151 # This object contains metadata for display in the public Gallery 152 153 library_metadata = { 154 "tags": ["null_check"], # Tags for this Expectation in the Gallery 155 "contributors": [ # Github handles for all contributors to this Expectation. 156 "@liyusa", # Don't forget to add your github handle here! 157 ], 158 } 159 160 161 if __name__ == "__main__": 162 163 ExpectMulticolumnValuesNotToBeAllNull().print_diagnostic_checklist() 164 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py b/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py --- a/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py +++ b/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py @@ -34,8 +34,7 @@ @multicolumn_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column_list, **kwargs): - row_wise_cond = column_list.isna().sum(axis=1) < len(column_list) - return row_wise_cond + return column_list.notna().any(axis=1) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine) @@ -118,7 +117,7 @@ ) # This dictionary contains default values for any parameters that should have default values - default_kwarg_values = {} + default_kwarg_values = {"ignore_row_if": "never"} def validate_configuration( self, configuration: Optional[ExpectationConfiguration] = None
{"golden_diff": "diff --git a/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py b/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py\n--- a/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py\n+++ b/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py\n@@ -34,8 +34,7 @@\n \n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n- row_wise_cond = column_list.isna().sum(axis=1) < len(column_list)\n- return row_wise_cond\n+ return column_list.notna().any(axis=1)\n \n # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine\n # @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n@@ -118,7 +117,7 @@\n )\n \n # This dictionary contains default values for any parameters that should have default values\n- default_kwarg_values = {}\n+ default_kwarg_values = {\"ignore_row_if\": \"never\"}\n \n def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration] = None\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from typing import Optional\n\nimport numpy as np\n\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.execution_engine import PandasExecutionEngine\nfrom great_expectations.expectations.expectation import MulticolumnMapExpectation\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n)\n\n\n# This class defines a Metric to support your Expectation.\n# For most MulticolumnMapExpectations, the main business logic for calculation will live in this class.\nclass MulticolumnValuesNotAllNull(MulticolumnMapMetricProvider):\n\n # This is the id string that will be used to reference your metric.\n\n condition_metric_name = \"multicolumn_values.not_all_null\"\n\n # These point your metric at the provided keys to facilitate calculation\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n condition_value_keys = ()\n\n # This method implements the core logic for the PandasExecutionEngine\n\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n row_wise_cond = column_list.isna().sum(axis=1) < len(column_list)\n return row_wise_cond\n\n # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine\n # @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n # def _sqlalchemy(cls, column_list, **kwargs):\n # raise NotImplementedError\n\n # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine\n # @multicolumn_condition_partial(engine=SparkDFExecutionEngine)\n # def _spark(cls, column_list, **kwargs):\n # raise NotImplementedError\n\n\n# This class defines the Expectation itself\nclass ExpectMulticolumnValuesNotToBeAllNull(MulticolumnMapExpectation):\n \"\"\"Expect the certain set of columns not to be null at the same time.\"\"\"\n\n # These examples will be shown in the public gallery.\n # They will also be executed as unit tests for your Expectation.\n examples = [\n {\n \"data\": {\n \"no_nulls\": [5, 6, 5, 12, -3],\n \"some_nulls\": [np.nan, -3, np.nan, np.nan, -9],\n \"one_non_null\": [np.nan, 2, np.nan, np.nan, np.nan],\n \"all_nulls\": [np.nan, np.nan, np.nan, np.nan, np.nan],\n },\n \"tests\": [\n {\n \"title\": \"basic_positive_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\"column_list\": [\"no_nulls\", \"some_nulls\"]},\n \"out\": {\n \"success\": True,\n },\n },\n {\n \"title\": \"basic_positive_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\n \"column_list\": [\"some_nulls\", \"one_non_null\"],\n \"mostly\": 0.4,\n },\n \"out\": {\n \"success\": True,\n },\n },\n {\n \"title\": \"basic_negative_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\n \"column_list\": [\"some_nulls\", \"one_non_null\", \"all_nulls\"],\n \"mostly\": 1,\n },\n \"out\": {\n \"success\": False,\n },\n },\n ],\n \"test_backends\": [\n {\n \"backend\": \"pandas\",\n \"dialects\": None,\n },\n ],\n }\n ]\n\n # This is the id string of the Metric used by this Expectation.\n # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.\n\n map_metric = \"multicolumn_values.not_all_null\"\n\n # This is a list of parameter names that can affect whether the Expectation evaluates to True or False\n success_keys = (\n \"column_list\",\n \"mostly\",\n )\n\n # This dictionary contains default values for any parameters that should have default values\n default_kwarg_values = {}\n\n def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration] = None\n ) -> None:\n \"\"\"\n Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that\n necessary configuration arguments have been provided for the validation of the expectation.\n\n Args:\n configuration (OPTIONAL[ExpectationConfiguration]): \\\n An optional Expectation Configuration entry that will be used to configure the expectation\n Returns:\n None. Raises InvalidExpectationConfigurationError if the config is not validated successfully\n \"\"\"\n\n super().validate_configuration(configuration)\n configuration = configuration or self.configuration\n\n # # Check other things in configuration.kwargs and raise Exceptions if needed\n # try:\n # assert (\n # ...\n # ), \"message\"\n # assert (\n # ...\n # ), \"message\"\n # except AssertionError as e:\n # raise InvalidExpectationConfigurationError(str(e))\n\n # This object contains metadata for display in the public Gallery\n\n library_metadata = {\n \"tags\": [\"null_check\"], # Tags for this Expectation in the Gallery\n \"contributors\": [ # Github handles for all contributors to this Expectation.\n \"@liyusa\", # Don't forget to add your github handle here!\n ],\n }\n\n\nif __name__ == \"__main__\":\n\n ExpectMulticolumnValuesNotToBeAllNull().print_diagnostic_checklist()\n", "path": "contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py"}], "after_files": [{"content": "from typing import Optional\n\nimport numpy as np\n\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.execution_engine import PandasExecutionEngine\nfrom great_expectations.expectations.expectation import MulticolumnMapExpectation\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n)\n\n\n# This class defines a Metric to support your Expectation.\n# For most MulticolumnMapExpectations, the main business logic for calculation will live in this class.\nclass MulticolumnValuesNotAllNull(MulticolumnMapMetricProvider):\n\n # This is the id string that will be used to reference your metric.\n\n condition_metric_name = \"multicolumn_values.not_all_null\"\n\n # These point your metric at the provided keys to facilitate calculation\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n condition_value_keys = ()\n\n # This method implements the core logic for the PandasExecutionEngine\n\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n return column_list.notna().any(axis=1)\n\n # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine\n # @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n # def _sqlalchemy(cls, column_list, **kwargs):\n # raise NotImplementedError\n\n # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine\n # @multicolumn_condition_partial(engine=SparkDFExecutionEngine)\n # def _spark(cls, column_list, **kwargs):\n # raise NotImplementedError\n\n\n# This class defines the Expectation itself\nclass ExpectMulticolumnValuesNotToBeAllNull(MulticolumnMapExpectation):\n \"\"\"Expect the certain set of columns not to be null at the same time.\"\"\"\n\n # These examples will be shown in the public gallery.\n # They will also be executed as unit tests for your Expectation.\n examples = [\n {\n \"data\": {\n \"no_nulls\": [5, 6, 5, 12, -3],\n \"some_nulls\": [np.nan, -3, np.nan, np.nan, -9],\n \"one_non_null\": [np.nan, 2, np.nan, np.nan, np.nan],\n \"all_nulls\": [np.nan, np.nan, np.nan, np.nan, np.nan],\n },\n \"tests\": [\n {\n \"title\": \"basic_positive_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\"column_list\": [\"no_nulls\", \"some_nulls\"]},\n \"out\": {\n \"success\": True,\n },\n },\n {\n \"title\": \"basic_positive_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\n \"column_list\": [\"some_nulls\", \"one_non_null\"],\n \"mostly\": 0.4,\n },\n \"out\": {\n \"success\": True,\n },\n },\n {\n \"title\": \"basic_negative_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\n \"column_list\": [\"some_nulls\", \"one_non_null\", \"all_nulls\"],\n \"mostly\": 1,\n },\n \"out\": {\n \"success\": False,\n },\n },\n ],\n \"test_backends\": [\n {\n \"backend\": \"pandas\",\n \"dialects\": None,\n },\n ],\n }\n ]\n\n # This is the id string of the Metric used by this Expectation.\n # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.\n\n map_metric = \"multicolumn_values.not_all_null\"\n\n # This is a list of parameter names that can affect whether the Expectation evaluates to True or False\n success_keys = (\n \"column_list\",\n \"mostly\",\n )\n\n # This dictionary contains default values for any parameters that should have default values\n default_kwarg_values = {\"ignore_row_if\": \"never\"}\n\n def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration] = None\n ) -> None:\n \"\"\"\n Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that\n necessary configuration arguments have been provided for the validation of the expectation.\n\n Args:\n configuration (OPTIONAL[ExpectationConfiguration]): \\\n An optional Expectation Configuration entry that will be used to configure the expectation\n Returns:\n None. Raises InvalidExpectationConfigurationError if the config is not validated successfully\n \"\"\"\n\n super().validate_configuration(configuration)\n configuration = configuration or self.configuration\n\n # # Check other things in configuration.kwargs and raise Exceptions if needed\n # try:\n # assert (\n # ...\n # ), \"message\"\n # assert (\n # ...\n # ), \"message\"\n # except AssertionError as e:\n # raise InvalidExpectationConfigurationError(str(e))\n\n # This object contains metadata for display in the public Gallery\n\n library_metadata = {\n \"tags\": [\"null_check\"], # Tags for this Expectation in the Gallery\n \"contributors\": [ # Github handles for all contributors to this Expectation.\n \"@liyusa\", # Don't forget to add your github handle here!\n ],\n }\n\n\nif __name__ == \"__main__\":\n\n ExpectMulticolumnValuesNotToBeAllNull().print_diagnostic_checklist()\n", "path": "contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py"}]}
1,957
306
gh_patches_debug_22178
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-729
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Empty hook file causes cryptic error If you have a pre_gen_project.sh or a post_gen_project.sh file with no data in it, cookiecutter fails with an unhelpful traceback. ``` Traceback (most recent call last): File "/usr/local/bin/cookiecutter", line 11, in <module> sys.exit(main()) File "/usr/local/lib/python2.7/site-packages/click/core.py", line 716, in __call__ return self.main(*args, **kwargs) File "/usr/local/lib/python2.7/site-packages/click/core.py", line 696, in main rv = self.invoke(ctx) File "/usr/local/lib/python2.7/site-packages/click/core.py", line 889, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python2.7/site-packages/click/core.py", line 534, in invoke return callback(*args, **kwargs) File "/usr/local/lib/python2.7/site-packages/cookiecutter/cli.py", line 100, in main config_file=user_config File "/usr/local/lib/python2.7/site-packages/cookiecutter/main.py", line 140, in cookiecutter output_dir=output_dir File "/usr/local/lib/python2.7/site-packages/cookiecutter/generate.py", line 273, in generate_files _run_hook_from_repo_dir(repo_dir, 'pre_gen_project', project_dir, context) File "/usr/local/lib/python2.7/site-packages/cookiecutter/generate.py", line 232, in _run_hook_from_repo_dir run_hook(hook_name, project_dir, context) File "/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py", line 116, in run_hook run_script_with_context(script, project_dir, context) File "/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py", line 101, in run_script_with_context run_script(temp.name, cwd) File "/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py", line 73, in run_script cwd=cwd File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/subprocess.py", line 656, in __init__ _cleanup() File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/subprocess.py", line 1335, in _execute_child raise child_exception OSError: [Errno 8] Exec format error ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cookiecutter/hooks.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 cookiecutter.hooks 6 ------------------ 7 8 Functions for discovering and executing various cookiecutter hooks. 9 """ 10 11 import io 12 import logging 13 import os 14 import subprocess 15 import sys 16 import tempfile 17 18 from jinja2 import Template 19 20 from cookiecutter import utils 21 from .exceptions import FailedHookException 22 23 24 _HOOKS = [ 25 'pre_gen_project', 26 'post_gen_project', 27 # TODO: other hooks should be listed here 28 ] 29 EXIT_SUCCESS = 0 30 31 32 def find_hooks(): 33 """ 34 Must be called with the project template as the current working directory. 35 Returns a dict of all hook scripts provided. 36 Dict's key will be the hook/script's name, without extension, while 37 values will be the absolute path to the script. 38 Missing scripts will not be included in the returned dict. 39 """ 40 hooks_dir = 'hooks' 41 r = {} 42 logging.debug('hooks_dir is {0}'.format(hooks_dir)) 43 if not os.path.isdir(hooks_dir): 44 logging.debug('No hooks/ dir in template_dir') 45 return r 46 for f in os.listdir(hooks_dir): 47 basename = os.path.splitext(os.path.basename(f))[0] 48 if basename in _HOOKS: 49 r[basename] = os.path.abspath(os.path.join(hooks_dir, f)) 50 return r 51 52 53 def run_script(script_path, cwd='.'): 54 """ 55 Executes a script from a working directory. 56 57 :param script_path: Absolute path to the script to run. 58 :param cwd: The directory to run the script from. 59 """ 60 run_thru_shell = sys.platform.startswith('win') 61 if script_path.endswith('.py'): 62 script_command = [sys.executable, script_path] 63 else: 64 script_command = [script_path] 65 66 utils.make_executable(script_path) 67 68 proc = subprocess.Popen( 69 script_command, 70 shell=run_thru_shell, 71 cwd=cwd 72 ) 73 exit_status = proc.wait() 74 if exit_status != EXIT_SUCCESS: 75 raise FailedHookException( 76 "Hook script failed (exit status: %d)" % exit_status) 77 78 79 def run_script_with_context(script_path, cwd, context): 80 """ 81 Executes a script after rendering with it Jinja. 82 83 :param script_path: Absolute path to the script to run. 84 :param cwd: The directory to run the script from. 85 :param context: Cookiecutter project template context. 86 """ 87 _, extension = os.path.splitext(script_path) 88 89 contents = io.open(script_path, 'r', encoding='utf-8').read() 90 91 with tempfile.NamedTemporaryFile( 92 delete=False, 93 mode='wb', 94 suffix=extension 95 ) as temp: 96 output = Template(contents).render(**context) 97 temp.write(output.encode('utf-8')) 98 99 run_script(temp.name, cwd) 100 101 102 def run_hook(hook_name, project_dir, context): 103 """ 104 Try to find and execute a hook from the specified project directory. 105 106 :param hook_name: The hook to execute. 107 :param project_dir: The directory to execute the script from. 108 :param context: Cookiecutter project context. 109 """ 110 script = find_hooks().get(hook_name) 111 if script is None: 112 logging.debug('No hooks found') 113 return 114 run_script_with_context(script, project_dir, context) 115 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py --- a/cookiecutter/hooks.py +++ b/cookiecutter/hooks.py @@ -8,6 +8,7 @@ Functions for discovering and executing various cookiecutter hooks. """ +import errno import io import logging import os @@ -65,15 +66,23 @@ utils.make_executable(script_path) - proc = subprocess.Popen( - script_command, - shell=run_thru_shell, - cwd=cwd - ) - exit_status = proc.wait() - if exit_status != EXIT_SUCCESS: + try: + proc = subprocess.Popen( + script_command, + shell=run_thru_shell, + cwd=cwd + ) + exit_status = proc.wait() + if exit_status != EXIT_SUCCESS: + raise FailedHookException( + "Hook script failed (exit status: %d)" % exit_status) + except OSError as oe: + if oe.errno == errno.ENOEXEC: + raise FailedHookException( + "Hook script failed, might be an " + "empty file or missing a shebang") raise FailedHookException( - "Hook script failed (exit status: %d)" % exit_status) + "Hook script failed (error: %s)" % oe) def run_script_with_context(script_path, cwd, context):
{"golden_diff": "diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py\n--- a/cookiecutter/hooks.py\n+++ b/cookiecutter/hooks.py\n@@ -8,6 +8,7 @@\n Functions for discovering and executing various cookiecutter hooks.\n \"\"\"\n \n+import errno\n import io\n import logging\n import os\n@@ -65,15 +66,23 @@\n \n utils.make_executable(script_path)\n \n- proc = subprocess.Popen(\n- script_command,\n- shell=run_thru_shell,\n- cwd=cwd\n- )\n- exit_status = proc.wait()\n- if exit_status != EXIT_SUCCESS:\n+ try:\n+ proc = subprocess.Popen(\n+ script_command,\n+ shell=run_thru_shell,\n+ cwd=cwd\n+ )\n+ exit_status = proc.wait()\n+ if exit_status != EXIT_SUCCESS:\n+ raise FailedHookException(\n+ \"Hook script failed (exit status: %d)\" % exit_status)\n+ except OSError as oe:\n+ if oe.errno == errno.ENOEXEC:\n+ raise FailedHookException(\n+ \"Hook script failed, might be an \"\n+ \"empty file or missing a shebang\")\n raise FailedHookException(\n- \"Hook script failed (exit status: %d)\" % exit_status)\n+ \"Hook script failed (error: %s)\" % oe)\n \n \n def run_script_with_context(script_path, cwd, context):\n", "issue": "Empty hook file causes cryptic error\nIf you have a pre_gen_project.sh or a post_gen_project.sh file with no data in it, cookiecutter fails with an unhelpful traceback.\n\n```\nTraceback (most recent call last):\n File \"/usr/local/bin/cookiecutter\", line 11, in <module>\n sys.exit(main())\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 716, in __call__\n return self.main(*args, **kwargs)\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 696, in main\n rv = self.invoke(ctx)\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 889, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 534, in invoke\n return callback(*args, **kwargs)\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/cli.py\", line 100, in main\n config_file=user_config\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/main.py\", line 140, in cookiecutter\n output_dir=output_dir\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/generate.py\", line 273, in generate_files\n _run_hook_from_repo_dir(repo_dir, 'pre_gen_project', project_dir, context)\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/generate.py\", line 232, in _run_hook_from_repo_dir\n run_hook(hook_name, project_dir, context)\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py\", line 116, in run_hook\n run_script_with_context(script, project_dir, context)\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py\", line 101, in run_script_with_context\n run_script(temp.name, cwd)\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py\", line 73, in run_script\n cwd=cwd\n File \"/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/subprocess.py\", line 656, in __init__\n _cleanup()\n File \"/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/subprocess.py\", line 1335, in _execute_child\n raise child_exception\nOSError: [Errno 8] Exec format error\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.hooks\n------------------\n\nFunctions for discovering and executing various cookiecutter hooks.\n\"\"\"\n\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom jinja2 import Template\n\nfrom cookiecutter import utils\nfrom .exceptions import FailedHookException\n\n\n_HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n # TODO: other hooks should be listed here\n]\nEXIT_SUCCESS = 0\n\n\ndef find_hooks():\n \"\"\"\n Must be called with the project template as the current working directory.\n Returns a dict of all hook scripts provided.\n Dict's key will be the hook/script's name, without extension, while\n values will be the absolute path to the script.\n Missing scripts will not be included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n r = {}\n logging.debug('hooks_dir is {0}'.format(hooks_dir))\n if not os.path.isdir(hooks_dir):\n logging.debug('No hooks/ dir in template_dir')\n return r\n for f in os.listdir(hooks_dir):\n basename = os.path.splitext(os.path.basename(f))[0]\n if basename in _HOOKS:\n r[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n return r\n\n\ndef run_script(script_path, cwd='.'):\n \"\"\"\n Executes a script from a working directory.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n \"\"\"\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n proc = subprocess.Popen(\n script_command,\n shell=run_thru_shell,\n cwd=cwd\n )\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n \"Hook script failed (exit status: %d)\" % exit_status)\n\n\ndef run_script_with_context(script_path, cwd, context):\n \"\"\"\n Executes a script after rendering with it Jinja.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n :param context: Cookiecutter project template context.\n \"\"\"\n _, extension = os.path.splitext(script_path)\n\n contents = io.open(script_path, 'r', encoding='utf-8').read()\n\n with tempfile.NamedTemporaryFile(\n delete=False,\n mode='wb',\n suffix=extension\n ) as temp:\n output = Template(contents).render(**context)\n temp.write(output.encode('utf-8'))\n\n run_script(temp.name, cwd)\n\n\ndef run_hook(hook_name, project_dir, context):\n \"\"\"\n Try to find and execute a hook from the specified project directory.\n\n :param hook_name: The hook to execute.\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n script = find_hooks().get(hook_name)\n if script is None:\n logging.debug('No hooks found')\n return\n run_script_with_context(script, project_dir, context)\n", "path": "cookiecutter/hooks.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.hooks\n------------------\n\nFunctions for discovering and executing various cookiecutter hooks.\n\"\"\"\n\nimport errno\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom jinja2 import Template\n\nfrom cookiecutter import utils\nfrom .exceptions import FailedHookException\n\n\n_HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n # TODO: other hooks should be listed here\n]\nEXIT_SUCCESS = 0\n\n\ndef find_hooks():\n \"\"\"\n Must be called with the project template as the current working directory.\n Returns a dict of all hook scripts provided.\n Dict's key will be the hook/script's name, without extension, while\n values will be the absolute path to the script.\n Missing scripts will not be included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n r = {}\n logging.debug('hooks_dir is {0}'.format(hooks_dir))\n if not os.path.isdir(hooks_dir):\n logging.debug('No hooks/ dir in template_dir')\n return r\n for f in os.listdir(hooks_dir):\n basename = os.path.splitext(os.path.basename(f))[0]\n if basename in _HOOKS:\n r[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n return r\n\n\ndef run_script(script_path, cwd='.'):\n \"\"\"\n Executes a script from a working directory.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n \"\"\"\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n try:\n proc = subprocess.Popen(\n script_command,\n shell=run_thru_shell,\n cwd=cwd\n )\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n \"Hook script failed (exit status: %d)\" % exit_status)\n except OSError as oe:\n if oe.errno == errno.ENOEXEC:\n raise FailedHookException(\n \"Hook script failed, might be an \"\n \"empty file or missing a shebang\")\n raise FailedHookException(\n \"Hook script failed (error: %s)\" % oe)\n\n\ndef run_script_with_context(script_path, cwd, context):\n \"\"\"\n Executes a script after rendering with it Jinja.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n :param context: Cookiecutter project template context.\n \"\"\"\n _, extension = os.path.splitext(script_path)\n\n contents = io.open(script_path, 'r', encoding='utf-8').read()\n\n with tempfile.NamedTemporaryFile(\n delete=False,\n mode='wb',\n suffix=extension\n ) as temp:\n output = Template(contents).render(**context)\n temp.write(output.encode('utf-8'))\n\n run_script(temp.name, cwd)\n\n\ndef run_hook(hook_name, project_dir, context):\n \"\"\"\n Try to find and execute a hook from the specified project directory.\n\n :param hook_name: The hook to execute.\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n script = find_hooks().get(hook_name)\n if script is None:\n logging.debug('No hooks found')\n return\n run_script_with_context(script, project_dir, context)\n", "path": "cookiecutter/hooks.py"}]}
1,842
317
gh_patches_debug_11771
rasdani/github-patches
git_diff
google__timesketch-268
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Wrong app context for CSV task We need to run the CSV importer task in the correct context. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `timesketch/lib/tasks.py` Content: ``` 1 # Copyright 2015 Google Inc. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """Celery task for processing Plaso storage files.""" 15 16 import os 17 import logging 18 import sys 19 20 from flask import current_app 21 # We currently don't have plaso in our Travis setup. This is a workaround 22 # for that until we fix the Travis environment. 23 # TODO: Add Plaso to our Travis environment we are running our tests in. 24 try: 25 from plaso.frontend import psort 26 except ImportError: 27 pass 28 29 from timesketch import create_celery_app 30 from timesketch.lib.datastores.elastic import ElasticsearchDataStore 31 from timesketch.lib.utils import read_and_validate_csv 32 from timesketch.models import db_session 33 from timesketch.models.sketch import SearchIndex 34 35 celery = create_celery_app() 36 37 38 def get_data_location(): 39 """Path to the plaso data directory. 40 41 Returns: 42 The path to where the plaso data directory is or None if not existing. 43 """ 44 data_location = current_app.config.get(u'PLASO_DATA_LOCATION', None) 45 if not data_location: 46 data_location = os.path.join(sys.prefix, u'share', u'plaso') 47 if not os.path.exists(data_location): 48 data_location = None 49 return data_location 50 51 52 @celery.task(track_started=True) 53 def run_plaso(source_file_path, timeline_name, index_name, username=None): 54 """Create a Celery task for processing Plaso storage file. 55 56 Args: 57 source_file_path: Path to plaso storage file. 58 timeline_name: Name of the Timesketch timeline. 59 index_name: Name of the datastore index. 60 username: Username of the user who will own the timeline. 61 62 Returns: 63 Dictionary with count of processed events. 64 """ 65 plaso_data_location = get_data_location() 66 flush_interval = 1000 # events to queue before bulk index 67 doc_type = u'plaso_event' # Document type for Elasticsearch 68 69 # Use Plaso psort frontend tool. 70 frontend = psort.PsortFrontend() 71 frontend.SetDataLocation(plaso_data_location) 72 storage_reader = frontend.CreateStorageReader(source_file_path) 73 74 # Setup the Timesketch output module. 75 output_module = frontend.CreateOutputModule(u'timesketch') 76 output_module.SetIndexName(index_name) 77 output_module.SetTimelineName(timeline_name) 78 output_module.SetFlushInterval(flush_interval) 79 output_module.SetDocType(doc_type) 80 if username: 81 output_module.SetUserName(username) 82 83 # Start process the Plaso storage file. 84 counter = frontend.ExportEvents(storage_reader, output_module) 85 86 return dict(counter) 87 88 89 @celery.task(track_started=True) 90 def run_csv(source_file_path, timeline_name, index_name, username=None): 91 """Create a Celery task for processing a CSV file. 92 93 Args: 94 source_file_path: Path to CSV file. 95 timeline_name: Name of the Timesketch timeline. 96 index_name: Name of the datastore index. 97 98 Returns: 99 Dictionary with count of processed events. 100 """ 101 flush_interval = 1000 # events to queue before bulk index 102 event_type = u'generic_event' # Document type for Elasticsearch 103 104 # Log information to Celery 105 logging.info(u'Index name: %s', index_name) 106 logging.info(u'Timeline name: %s', timeline_name) 107 logging.info(u'Flush interval: %d', flush_interval) 108 logging.info(u'Document type: %s', event_type) 109 logging.info(u'Owner: %s', username) 110 111 es = ElasticsearchDataStore( 112 host=current_app.config[u'ELASTIC_HOST'], 113 port=current_app.config[u'ELASTIC_PORT']) 114 115 es.create_index(index_name=index_name, doc_type=event_type) 116 for event in read_and_validate_csv(source_file_path): 117 es.import_event( 118 flush_interval, index_name, event_type, event) 119 120 # Import the remaining events 121 total_events = es.import_event(flush_interval, index_name, event_type) 122 123 # We are done so let's remove the processing status flag 124 search_index = SearchIndex.query.filter_by(index_name=index_name).first() 125 search_index.status.remove(search_index.status[0]) 126 db_session.add(search_index) 127 db_session.commit() 128 129 return {u'Events processed': total_events} 130 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/timesketch/lib/tasks.py b/timesketch/lib/tasks.py --- a/timesketch/lib/tasks.py +++ b/timesketch/lib/tasks.py @@ -121,9 +121,10 @@ total_events = es.import_event(flush_interval, index_name, event_type) # We are done so let's remove the processing status flag - search_index = SearchIndex.query.filter_by(index_name=index_name).first() - search_index.status.remove(search_index.status[0]) - db_session.add(search_index) - db_session.commit() + with celery.app.app_context(): + search_index = SearchIndex.query.filter_by(index_name=index_name).first() + search_index.status.remove(search_index.status[0]) + db_session.add(search_index) + db_session.commit() return {u'Events processed': total_events}
{"golden_diff": "diff --git a/timesketch/lib/tasks.py b/timesketch/lib/tasks.py\n--- a/timesketch/lib/tasks.py\n+++ b/timesketch/lib/tasks.py\n@@ -121,9 +121,10 @@\n total_events = es.import_event(flush_interval, index_name, event_type)\n \n # We are done so let's remove the processing status flag\n- search_index = SearchIndex.query.filter_by(index_name=index_name).first()\n- search_index.status.remove(search_index.status[0])\n- db_session.add(search_index)\n- db_session.commit()\n+ with celery.app.app_context():\n+ search_index = SearchIndex.query.filter_by(index_name=index_name).first()\n+ search_index.status.remove(search_index.status[0])\n+ db_session.add(search_index)\n+ db_session.commit()\n \n return {u'Events processed': total_events}\n", "issue": "Wrong app context for CSV task\nWe need to run the CSV importer task in the correct context.\n", "before_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Celery task for processing Plaso storage files.\"\"\"\n\nimport os\nimport logging\nimport sys\n\nfrom flask import current_app\n# We currently don't have plaso in our Travis setup. This is a workaround\n# for that until we fix the Travis environment.\n# TODO: Add Plaso to our Travis environment we are running our tests in.\ntry:\n from plaso.frontend import psort\nexcept ImportError:\n pass\n\nfrom timesketch import create_celery_app\nfrom timesketch.lib.datastores.elastic import ElasticsearchDataStore\nfrom timesketch.lib.utils import read_and_validate_csv\nfrom timesketch.models import db_session\nfrom timesketch.models.sketch import SearchIndex\n\ncelery = create_celery_app()\n\n\ndef get_data_location():\n \"\"\"Path to the plaso data directory.\n\n Returns:\n The path to where the plaso data directory is or None if not existing.\n \"\"\"\n data_location = current_app.config.get(u'PLASO_DATA_LOCATION', None)\n if not data_location:\n data_location = os.path.join(sys.prefix, u'share', u'plaso')\n if not os.path.exists(data_location):\n data_location = None\n return data_location\n\n\n@celery.task(track_started=True)\ndef run_plaso(source_file_path, timeline_name, index_name, username=None):\n \"\"\"Create a Celery task for processing Plaso storage file.\n\n Args:\n source_file_path: Path to plaso storage file.\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n username: Username of the user who will own the timeline.\n\n Returns:\n Dictionary with count of processed events.\n \"\"\"\n plaso_data_location = get_data_location()\n flush_interval = 1000 # events to queue before bulk index\n doc_type = u'plaso_event' # Document type for Elasticsearch\n\n # Use Plaso psort frontend tool.\n frontend = psort.PsortFrontend()\n frontend.SetDataLocation(plaso_data_location)\n storage_reader = frontend.CreateStorageReader(source_file_path)\n\n # Setup the Timesketch output module.\n output_module = frontend.CreateOutputModule(u'timesketch')\n output_module.SetIndexName(index_name)\n output_module.SetTimelineName(timeline_name)\n output_module.SetFlushInterval(flush_interval)\n output_module.SetDocType(doc_type)\n if username:\n output_module.SetUserName(username)\n\n # Start process the Plaso storage file.\n counter = frontend.ExportEvents(storage_reader, output_module)\n\n return dict(counter)\n\n\n@celery.task(track_started=True)\ndef run_csv(source_file_path, timeline_name, index_name, username=None):\n \"\"\"Create a Celery task for processing a CSV file.\n\n Args:\n source_file_path: Path to CSV file.\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n\n Returns:\n Dictionary with count of processed events.\n \"\"\"\n flush_interval = 1000 # events to queue before bulk index\n event_type = u'generic_event' # Document type for Elasticsearch\n\n # Log information to Celery\n logging.info(u'Index name: %s', index_name)\n logging.info(u'Timeline name: %s', timeline_name)\n logging.info(u'Flush interval: %d', flush_interval)\n logging.info(u'Document type: %s', event_type)\n logging.info(u'Owner: %s', username)\n\n es = ElasticsearchDataStore(\n host=current_app.config[u'ELASTIC_HOST'],\n port=current_app.config[u'ELASTIC_PORT'])\n\n es.create_index(index_name=index_name, doc_type=event_type)\n for event in read_and_validate_csv(source_file_path):\n es.import_event(\n flush_interval, index_name, event_type, event)\n\n # Import the remaining events\n total_events = es.import_event(flush_interval, index_name, event_type)\n\n # We are done so let's remove the processing status flag\n search_index = SearchIndex.query.filter_by(index_name=index_name).first()\n search_index.status.remove(search_index.status[0])\n db_session.add(search_index)\n db_session.commit()\n\n return {u'Events processed': total_events}\n", "path": "timesketch/lib/tasks.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Celery task for processing Plaso storage files.\"\"\"\n\nimport os\nimport logging\nimport sys\n\nfrom flask import current_app\n# We currently don't have plaso in our Travis setup. This is a workaround\n# for that until we fix the Travis environment.\n# TODO: Add Plaso to our Travis environment we are running our tests in.\ntry:\n from plaso.frontend import psort\nexcept ImportError:\n pass\n\nfrom timesketch import create_celery_app\nfrom timesketch.lib.datastores.elastic import ElasticsearchDataStore\nfrom timesketch.lib.utils import read_and_validate_csv\nfrom timesketch.models import db_session\nfrom timesketch.models.sketch import SearchIndex\n\ncelery = create_celery_app()\n\n\ndef get_data_location():\n \"\"\"Path to the plaso data directory.\n\n Returns:\n The path to where the plaso data directory is or None if not existing.\n \"\"\"\n data_location = current_app.config.get(u'PLASO_DATA_LOCATION', None)\n if not data_location:\n data_location = os.path.join(sys.prefix, u'share', u'plaso')\n if not os.path.exists(data_location):\n data_location = None\n return data_location\n\n\n@celery.task(track_started=True)\ndef run_plaso(source_file_path, timeline_name, index_name, username=None):\n \"\"\"Create a Celery task for processing Plaso storage file.\n\n Args:\n source_file_path: Path to plaso storage file.\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n username: Username of the user who will own the timeline.\n\n Returns:\n Dictionary with count of processed events.\n \"\"\"\n plaso_data_location = get_data_location()\n flush_interval = 1000 # events to queue before bulk index\n doc_type = u'plaso_event' # Document type for Elasticsearch\n\n # Use Plaso psort frontend tool.\n frontend = psort.PsortFrontend()\n frontend.SetDataLocation(plaso_data_location)\n storage_reader = frontend.CreateStorageReader(source_file_path)\n\n # Setup the Timesketch output module.\n output_module = frontend.CreateOutputModule(u'timesketch')\n output_module.SetIndexName(index_name)\n output_module.SetTimelineName(timeline_name)\n output_module.SetFlushInterval(flush_interval)\n output_module.SetDocType(doc_type)\n if username:\n output_module.SetUserName(username)\n\n # Start process the Plaso storage file.\n counter = frontend.ExportEvents(storage_reader, output_module)\n\n return dict(counter)\n\n\n@celery.task(track_started=True)\ndef run_csv(source_file_path, timeline_name, index_name, username=None):\n \"\"\"Create a Celery task for processing a CSV file.\n\n Args:\n source_file_path: Path to CSV file.\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n\n Returns:\n Dictionary with count of processed events.\n \"\"\"\n flush_interval = 1000 # events to queue before bulk index\n event_type = u'generic_event' # Document type for Elasticsearch\n\n # Log information to Celery\n logging.info(u'Index name: %s', index_name)\n logging.info(u'Timeline name: %s', timeline_name)\n logging.info(u'Flush interval: %d', flush_interval)\n logging.info(u'Document type: %s', event_type)\n logging.info(u'Owner: %s', username)\n\n es = ElasticsearchDataStore(\n host=current_app.config[u'ELASTIC_HOST'],\n port=current_app.config[u'ELASTIC_PORT'])\n\n es.create_index(index_name=index_name, doc_type=event_type)\n for event in read_and_validate_csv(source_file_path):\n es.import_event(\n flush_interval, index_name, event_type, event)\n\n # Import the remaining events\n total_events = es.import_event(flush_interval, index_name, event_type)\n\n # We are done so let's remove the processing status flag\n with celery.app.app_context():\n search_index = SearchIndex.query.filter_by(index_name=index_name).first()\n search_index.status.remove(search_index.status[0])\n db_session.add(search_index)\n db_session.commit()\n\n return {u'Events processed': total_events}\n", "path": "timesketch/lib/tasks.py"}]}
1,621
193
gh_patches_debug_39306
rasdani/github-patches
git_diff
piskvorky__gensim-2245
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- wiki examples: sample code to get from tfidf doc to wikipedia title/uri and vice versa I very much like the LSI and LDA [wiki examples](https://github.com/RaRe-Technologies/gensim/blob/develop/docs/src/wiki.rst), but one aspect that i think is missing is: how to get from tf-idf doc vectors (or later LSI / LDA vecs) back to Wikipedia URIs (or titles if easier) and vice versa? Am i missing something obvious, or do i have to run another pass over the wiki dump, as the titles aren't saved anywhere? I'll happily make a PR to extend the examples with this... --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gensim/scripts/make_wikicorpus.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz> 5 # Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com> 6 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html 7 8 9 """ 10 USAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE] 11 12 Convert articles from a Wikipedia dump to (sparse) vectors. The input is a 13 bz2-compressed dump of Wikipedia articles, in XML format. 14 15 This actually creates three files: 16 17 * `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids 18 * `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in 19 Matrix Matrix format 20 * `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation 21 * `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump 22 23 The output Matrix Market files can then be compressed (e.g., by bzip2) to save 24 disk space; gensim's corpus iterators can work with compressed input, too. 25 26 `VOCABULARY_SIZE` controls how many of the most frequent words to keep (after 27 removing tokens that appear in more than 10%% of all documents). Defaults to 28 100,000. 29 30 If you have the `pattern` package installed, this script will use a fancy 31 lemmatization to get a lemma of each token (instead of plain alphabetic 32 tokenizer). The package is available at https://github.com/clips/pattern . 33 34 Example: 35 python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki 36 """ 37 38 39 import logging 40 import os.path 41 import sys 42 43 from gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus 44 from gensim.models import TfidfModel 45 46 47 # Wiki is first scanned for all distinct word types (~7M). The types that 48 # appear in more than 10% of articles are removed and from the rest, the 49 # DEFAULT_DICT_SIZE most frequent types are kept. 50 DEFAULT_DICT_SIZE = 100000 51 52 53 if __name__ == '__main__': 54 program = os.path.basename(sys.argv[0]) 55 logger = logging.getLogger(program) 56 57 logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s') 58 logging.root.setLevel(level=logging.INFO) 59 logger.info("running %s", ' '.join(sys.argv)) 60 61 # check and process input arguments 62 if len(sys.argv) < 3: 63 print(globals()['__doc__'] % locals()) 64 sys.exit(1) 65 inp, outp = sys.argv[1:3] 66 67 if not os.path.isdir(os.path.dirname(outp)): 68 raise SystemExit("Error: The output directory does not exist. Create the directory and try again.") 69 70 if len(sys.argv) > 3: 71 keep_words = int(sys.argv[3]) 72 else: 73 keep_words = DEFAULT_DICT_SIZE 74 online = 'online' in program 75 lemmatize = 'lemma' in program 76 debug = 'nodebug' not in program 77 78 if online: 79 dictionary = HashDictionary(id_range=keep_words, debug=debug) 80 dictionary.allow_update = True # start collecting document frequencies 81 wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary) 82 # ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012) 83 MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) 84 # with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize` 85 dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE) 86 dictionary.save_as_text(outp + '_wordids.txt.bz2') 87 wiki.save(outp + '_corpus.pkl.bz2') 88 dictionary.allow_update = False 89 else: 90 wiki = WikiCorpus(inp, lemmatize=lemmatize) # takes about 9h on a macbook pro, for 3.5m articles (june 2011) 91 # only keep the most frequent words (out of total ~8.2m unique tokens) 92 wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE) 93 # save dictionary and bag-of-words (term-document frequency matrix) 94 MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h 95 wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2') 96 # load back the id->word mapping directly from file 97 # this seems to save more memory, compared to keeping the wiki.dictionary object from above 98 dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2') 99 del wiki 100 101 # initialize corpus reader and word->id mapping 102 mm = MmCorpus(outp + '_bow.mm') 103 104 # build tfidf, ~50min 105 tfidf = TfidfModel(mm, id2word=dictionary, normalize=True) 106 tfidf.save(outp + '.tfidf_model') 107 108 # save tfidf vectors in matrix market format 109 # ~4h; result file is 15GB! bzip2'ed down to 4.5GB 110 MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000) 111 112 logger.info("finished running %s", program) 113 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gensim/scripts/make_wikicorpus.py b/gensim/scripts/make_wikicorpus.py --- a/gensim/scripts/make_wikicorpus.py +++ b/gensim/scripts/make_wikicorpus.py @@ -12,13 +12,15 @@ Convert articles from a Wikipedia dump to (sparse) vectors. The input is a bz2-compressed dump of Wikipedia articles, in XML format. -This actually creates three files: +This actually creates several files: -* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids -* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in - Matrix Matrix format -* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation -* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump +* `OUTPUT_PREFIX_wordids.txt.bz2`: mapping between words and their integer ids +* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation in Matrix Market format +* `OUTPUT_PREFIX_bow.mm.index`: index for `OUTPUT_PREFIX_bow.mm` +* `OUTPUT_PREFIX_bow.mm.metadata.cpickle`: titles of documents +* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation in Matix Market format +* `OUTPUT_PREFIX_tfidf.mm.index`: index for `OUTPUT_PREFIX_tfidf.mm` +* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model The output Matrix Market files can then be compressed (e.g., by bzip2) to save disk space; gensim's corpus iterators can work with compressed input, too. @@ -80,7 +82,7 @@ dictionary.allow_update = True # start collecting document frequencies wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary) # ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012) - MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) + MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True) # with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize` dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE) dictionary.save_as_text(outp + '_wordids.txt.bz2') @@ -91,7 +93,7 @@ # only keep the most frequent words (out of total ~8.2m unique tokens) wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE) # save dictionary and bag-of-words (term-document frequency matrix) - MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h + MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True) # another ~9h wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2') # load back the id->word mapping directly from file # this seems to save more memory, compared to keeping the wiki.dictionary object from above
{"golden_diff": "diff --git a/gensim/scripts/make_wikicorpus.py b/gensim/scripts/make_wikicorpus.py\n--- a/gensim/scripts/make_wikicorpus.py\n+++ b/gensim/scripts/make_wikicorpus.py\n@@ -12,13 +12,15 @@\n Convert articles from a Wikipedia dump to (sparse) vectors. The input is a\n bz2-compressed dump of Wikipedia articles, in XML format.\n \n-This actually creates three files:\n+This actually creates several files:\n \n-* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids\n-* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in\n- Matrix Matrix format\n-* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation\n-* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump\n+* `OUTPUT_PREFIX_wordids.txt.bz2`: mapping between words and their integer ids\n+* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation in Matrix Market format\n+* `OUTPUT_PREFIX_bow.mm.index`: index for `OUTPUT_PREFIX_bow.mm`\n+* `OUTPUT_PREFIX_bow.mm.metadata.cpickle`: titles of documents\n+* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation in Matix Market format\n+* `OUTPUT_PREFIX_tfidf.mm.index`: index for `OUTPUT_PREFIX_tfidf.mm`\n+* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model\n \n The output Matrix Market files can then be compressed (e.g., by bzip2) to save\n disk space; gensim's corpus iterators can work with compressed input, too.\n@@ -80,7 +82,7 @@\n dictionary.allow_update = True # start collecting document frequencies\n wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)\n # ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)\n- MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000)\n+ MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True)\n # with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`\n dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)\n dictionary.save_as_text(outp + '_wordids.txt.bz2')\n@@ -91,7 +93,7 @@\n # only keep the most frequent words (out of total ~8.2m unique tokens)\n wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)\n # save dictionary and bag-of-words (term-document frequency matrix)\n- MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h\n+ MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True) # another ~9h\n wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')\n # load back the id->word mapping directly from file\n # this seems to save more memory, compared to keeping the wiki.dictionary object from above\n", "issue": "wiki examples: sample code to get from tfidf doc to wikipedia title/uri and vice versa\nI very much like the LSI and LDA [wiki examples](https://github.com/RaRe-Technologies/gensim/blob/develop/docs/src/wiki.rst), but one aspect that i think is missing is: how to get from tf-idf doc vectors (or later LSI / LDA vecs) back to Wikipedia URIs (or titles if easier) and vice versa?\r\n\r\nAm i missing something obvious, or do i have to run another pass over the wiki dump, as the titles aren't saved anywhere?\r\n\r\nI'll happily make a PR to extend the examples with this...\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>\n# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\n\"\"\"\nUSAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]\n\nConvert articles from a Wikipedia dump to (sparse) vectors. The input is a\nbz2-compressed dump of Wikipedia articles, in XML format.\n\nThis actually creates three files:\n\n* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids\n* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in\n Matrix Matrix format\n* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation\n* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump\n\nThe output Matrix Market files can then be compressed (e.g., by bzip2) to save\ndisk space; gensim's corpus iterators can work with compressed input, too.\n\n`VOCABULARY_SIZE` controls how many of the most frequent words to keep (after\nremoving tokens that appear in more than 10%% of all documents). Defaults to\n100,000.\n\nIf you have the `pattern` package installed, this script will use a fancy\nlemmatization to get a lemma of each token (instead of plain alphabetic\ntokenizer). The package is available at https://github.com/clips/pattern .\n\nExample:\n python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki\n\"\"\"\n\n\nimport logging\nimport os.path\nimport sys\n\nfrom gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus\nfrom gensim.models import TfidfModel\n\n\n# Wiki is first scanned for all distinct word types (~7M). The types that\n# appear in more than 10% of articles are removed and from the rest, the\n# DEFAULT_DICT_SIZE most frequent types are kept.\nDEFAULT_DICT_SIZE = 100000\n\n\nif __name__ == '__main__':\n program = os.path.basename(sys.argv[0])\n logger = logging.getLogger(program)\n\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')\n logging.root.setLevel(level=logging.INFO)\n logger.info(\"running %s\", ' '.join(sys.argv))\n\n # check and process input arguments\n if len(sys.argv) < 3:\n print(globals()['__doc__'] % locals())\n sys.exit(1)\n inp, outp = sys.argv[1:3]\n\n if not os.path.isdir(os.path.dirname(outp)):\n raise SystemExit(\"Error: The output directory does not exist. Create the directory and try again.\")\n\n if len(sys.argv) > 3:\n keep_words = int(sys.argv[3])\n else:\n keep_words = DEFAULT_DICT_SIZE\n online = 'online' in program\n lemmatize = 'lemma' in program\n debug = 'nodebug' not in program\n\n if online:\n dictionary = HashDictionary(id_range=keep_words, debug=debug)\n dictionary.allow_update = True # start collecting document frequencies\n wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)\n # ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)\n MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000)\n # with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`\n dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)\n dictionary.save_as_text(outp + '_wordids.txt.bz2')\n wiki.save(outp + '_corpus.pkl.bz2')\n dictionary.allow_update = False\n else:\n wiki = WikiCorpus(inp, lemmatize=lemmatize) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)\n # only keep the most frequent words (out of total ~8.2m unique tokens)\n wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)\n # save dictionary and bag-of-words (term-document frequency matrix)\n MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h\n wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')\n # load back the id->word mapping directly from file\n # this seems to save more memory, compared to keeping the wiki.dictionary object from above\n dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')\n del wiki\n\n # initialize corpus reader and word->id mapping\n mm = MmCorpus(outp + '_bow.mm')\n\n # build tfidf, ~50min\n tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)\n tfidf.save(outp + '.tfidf_model')\n\n # save tfidf vectors in matrix market format\n # ~4h; result file is 15GB! bzip2'ed down to 4.5GB\n MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)\n\n logger.info(\"finished running %s\", program)\n", "path": "gensim/scripts/make_wikicorpus.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>\n# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\n\"\"\"\nUSAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]\n\nConvert articles from a Wikipedia dump to (sparse) vectors. The input is a\nbz2-compressed dump of Wikipedia articles, in XML format.\n\nThis actually creates several files:\n\n* `OUTPUT_PREFIX_wordids.txt.bz2`: mapping between words and their integer ids\n* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation in Matrix Market format\n* `OUTPUT_PREFIX_bow.mm.index`: index for `OUTPUT_PREFIX_bow.mm`\n* `OUTPUT_PREFIX_bow.mm.metadata.cpickle`: titles of documents\n* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation in Matix Market format\n* `OUTPUT_PREFIX_tfidf.mm.index`: index for `OUTPUT_PREFIX_tfidf.mm`\n* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model\n\nThe output Matrix Market files can then be compressed (e.g., by bzip2) to save\ndisk space; gensim's corpus iterators can work with compressed input, too.\n\n`VOCABULARY_SIZE` controls how many of the most frequent words to keep (after\nremoving tokens that appear in more than 10%% of all documents). Defaults to\n100,000.\n\nIf you have the `pattern` package installed, this script will use a fancy\nlemmatization to get a lemma of each token (instead of plain alphabetic\ntokenizer). The package is available at https://github.com/clips/pattern .\n\nExample:\n python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki\n\"\"\"\n\n\nimport logging\nimport os.path\nimport sys\n\nfrom gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus\nfrom gensim.models import TfidfModel\n\n\n# Wiki is first scanned for all distinct word types (~7M). The types that\n# appear in more than 10% of articles are removed and from the rest, the\n# DEFAULT_DICT_SIZE most frequent types are kept.\nDEFAULT_DICT_SIZE = 100000\n\n\nif __name__ == '__main__':\n program = os.path.basename(sys.argv[0])\n logger = logging.getLogger(program)\n\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')\n logging.root.setLevel(level=logging.INFO)\n logger.info(\"running %s\", ' '.join(sys.argv))\n\n # check and process input arguments\n if len(sys.argv) < 3:\n print(globals()['__doc__'] % locals())\n sys.exit(1)\n inp, outp = sys.argv[1:3]\n\n if not os.path.isdir(os.path.dirname(outp)):\n raise SystemExit(\"Error: The output directory does not exist. Create the directory and try again.\")\n\n if len(sys.argv) > 3:\n keep_words = int(sys.argv[3])\n else:\n keep_words = DEFAULT_DICT_SIZE\n online = 'online' in program\n lemmatize = 'lemma' in program\n debug = 'nodebug' not in program\n\n if online:\n dictionary = HashDictionary(id_range=keep_words, debug=debug)\n dictionary.allow_update = True # start collecting document frequencies\n wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)\n # ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)\n MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True)\n # with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`\n dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)\n dictionary.save_as_text(outp + '_wordids.txt.bz2')\n wiki.save(outp + '_corpus.pkl.bz2')\n dictionary.allow_update = False\n else:\n wiki = WikiCorpus(inp, lemmatize=lemmatize) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)\n # only keep the most frequent words (out of total ~8.2m unique tokens)\n wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)\n # save dictionary and bag-of-words (term-document frequency matrix)\n MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True) # another ~9h\n wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')\n # load back the id->word mapping directly from file\n # this seems to save more memory, compared to keeping the wiki.dictionary object from above\n dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')\n del wiki\n\n # initialize corpus reader and word->id mapping\n mm = MmCorpus(outp + '_bow.mm')\n\n # build tfidf, ~50min\n tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)\n tfidf.save(outp + '.tfidf_model')\n\n # save tfidf vectors in matrix market format\n # ~4h; result file is 15GB! bzip2'ed down to 4.5GB\n MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)\n\n logger.info(\"finished running %s\", program)\n", "path": "gensim/scripts/make_wikicorpus.py"}]}
1,900
736
gh_patches_debug_28738
rasdani/github-patches
git_diff
mirumee__ariadne-661
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- snake_case_fallback_resolvers not calling obj.get(attr_name) **Ariadne version:** 0.13.0 **Python version:** 3.8.11 Hello. I am using the [databases](https://www.encode.io/databases/) package with an [asyncpg](https://magicstack.github.io/asyncpg/current/) backend to interact with a PostgreSQL database. The objects returned from my queries are of the type `databases.backends.postgres.Record`. The desired attributes can only can accessed via the get method. However, when I use `snake_case_fallback_resolvers`, Ariadne has trouble resolving the requested fields and I receive the following error: `Cannot return null for non-nullable field` If I instead use the regular `fallback_resolvers` (adjusting my schema's naming conventions), Ariadne is able to resolve the requested fields. Is this a bug or am I doing something wrong? Thank you for your time. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ariadne/utils.py` Content: ``` 1 import asyncio 2 from functools import wraps 3 from typing import Optional, Union, Callable, Dict, Any 4 5 from graphql import GraphQLError, parse 6 7 8 def convert_camel_case_to_snake(graphql_name: str) -> str: 9 # pylint: disable=too-many-boolean-expressions 10 max_index = len(graphql_name) - 1 11 lowered_name = graphql_name.lower() 12 13 python_name = "" 14 for i, c in enumerate(lowered_name): 15 if i > 0 and ( 16 # testWord -> test_word 17 ( 18 c != graphql_name[i] 19 and graphql_name[i - 1] != "_" 20 and graphql_name[i - 1] == python_name[-1] 21 ) 22 # TESTWord -> test_word 23 or ( 24 i < max_index 25 and graphql_name[i] != lowered_name[i] 26 and graphql_name[i + 1] == lowered_name[i + 1] 27 ) 28 # test134 -> test_134 29 or (c.isdigit() and not graphql_name[i - 1].isdigit()) 30 # 134test -> 134_test 31 or (not c.isdigit() and graphql_name[i - 1].isdigit()) 32 ): 33 python_name += "_" 34 python_name += c 35 return python_name 36 37 38 def gql(value: str) -> str: 39 parse(value) 40 return value 41 42 43 def unwrap_graphql_error( 44 error: Union[GraphQLError, Optional[Exception]] 45 ) -> Optional[Exception]: 46 if isinstance(error, GraphQLError): 47 return unwrap_graphql_error(error.original_error) 48 return error 49 50 51 def convert_kwargs_to_snake_case(func: Callable) -> Callable: 52 def convert_to_snake_case(d: Dict) -> Dict: 53 converted: Dict = {} 54 for k, v in d.items(): 55 if isinstance(v, dict): 56 v = convert_to_snake_case(v) 57 if isinstance(v, list): 58 v = [convert_to_snake_case(i) if isinstance(i, dict) else i for i in v] 59 converted[convert_camel_case_to_snake(k)] = v 60 return converted 61 62 if asyncio.iscoroutinefunction(func): 63 64 @wraps(func) 65 async def async_wrapper(*args: Any, **kwargs: Any) -> Any: 66 return await func(*args, **convert_to_snake_case(kwargs)) 67 68 return async_wrapper 69 70 @wraps(func) 71 def wrapper(*args: Any, **kwargs: Any) -> Any: 72 return func(*args, **convert_to_snake_case(kwargs)) 73 74 return wrapper 75 ``` Path: `ariadne/resolvers.py` Content: ``` 1 from typing import Any 2 3 from graphql import default_field_resolver 4 from graphql.type import ( 5 GraphQLField, 6 GraphQLObjectType, 7 GraphQLResolveInfo, 8 GraphQLSchema, 9 ) 10 11 from .types import Resolver, SchemaBindable 12 from .utils import convert_camel_case_to_snake 13 14 15 class FallbackResolversSetter(SchemaBindable): 16 def bind_to_schema(self, schema: GraphQLSchema) -> None: 17 for type_object in schema.type_map.values(): 18 if isinstance(type_object, GraphQLObjectType): 19 self.add_resolvers_to_object_fields(type_object) 20 21 def add_resolvers_to_object_fields(self, type_object) -> None: 22 for field_name, field_object in type_object.fields.items(): 23 self.add_resolver_to_field(field_name, field_object) 24 25 def add_resolver_to_field(self, _: str, field_object: GraphQLField) -> None: 26 if field_object.resolve is None: 27 field_object.resolve = default_field_resolver 28 29 30 class SnakeCaseFallbackResolversSetter(FallbackResolversSetter): 31 def add_resolver_to_field( 32 self, field_name: str, field_object: GraphQLField 33 ) -> None: 34 if field_object.resolve is None: 35 field_name = convert_camel_case_to_snake(field_name) 36 field_object.resolve = resolve_to(field_name) 37 38 39 fallback_resolvers = FallbackResolversSetter() 40 snake_case_fallback_resolvers = SnakeCaseFallbackResolversSetter() 41 42 43 def resolve_parent_field(parent: Any, field_name: str) -> Any: 44 if isinstance(parent, dict): 45 return parent.get(field_name) 46 return getattr(parent, field_name, None) 47 48 49 def resolve_to(field_name: str) -> Resolver: 50 def resolver(parent: Any, info: GraphQLResolveInfo, **kwargs) -> Any: 51 value = resolve_parent_field(parent, field_name) 52 if callable(value): 53 return value(info, **kwargs) 54 return value 55 56 # pylint: disable=protected-access 57 resolver._ariadne_alias_resolver = True # type: ignore 58 return resolver 59 60 61 def is_default_resolver(resolver: Resolver) -> bool: 62 # pylint: disable=comparison-with-callable 63 if resolver == default_field_resolver: 64 return True 65 return hasattr(resolver, "_ariadne_alias_resolver") 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ariadne/resolvers.py b/ariadne/resolvers.py --- a/ariadne/resolvers.py +++ b/ariadne/resolvers.py @@ -1,3 +1,4 @@ +from collections.abc import Mapping from typing import Any from graphql import default_field_resolver @@ -41,7 +42,7 @@ def resolve_parent_field(parent: Any, field_name: str) -> Any: - if isinstance(parent, dict): + if isinstance(parent, Mapping): return parent.get(field_name) return getattr(parent, field_name, None) diff --git a/ariadne/utils.py b/ariadne/utils.py --- a/ariadne/utils.py +++ b/ariadne/utils.py @@ -1,4 +1,5 @@ import asyncio +from collections.abc import Mapping from functools import wraps from typing import Optional, Union, Callable, Dict, Any @@ -49,13 +50,15 @@ def convert_kwargs_to_snake_case(func: Callable) -> Callable: - def convert_to_snake_case(d: Dict) -> Dict: + def convert_to_snake_case(m: Mapping) -> Dict: converted: Dict = {} - for k, v in d.items(): - if isinstance(v, dict): + for k, v in m.items(): + if isinstance(v, Mapping): v = convert_to_snake_case(v) if isinstance(v, list): - v = [convert_to_snake_case(i) if isinstance(i, dict) else i for i in v] + v = [ + convert_to_snake_case(i) if isinstance(i, Mapping) else i for i in v + ] converted[convert_camel_case_to_snake(k)] = v return converted
{"golden_diff": "diff --git a/ariadne/resolvers.py b/ariadne/resolvers.py\n--- a/ariadne/resolvers.py\n+++ b/ariadne/resolvers.py\n@@ -1,3 +1,4 @@\n+from collections.abc import Mapping\n from typing import Any\n \n from graphql import default_field_resolver\n@@ -41,7 +42,7 @@\n \n \n def resolve_parent_field(parent: Any, field_name: str) -> Any:\n- if isinstance(parent, dict):\n+ if isinstance(parent, Mapping):\n return parent.get(field_name)\n return getattr(parent, field_name, None)\n \ndiff --git a/ariadne/utils.py b/ariadne/utils.py\n--- a/ariadne/utils.py\n+++ b/ariadne/utils.py\n@@ -1,4 +1,5 @@\n import asyncio\n+from collections.abc import Mapping\n from functools import wraps\n from typing import Optional, Union, Callable, Dict, Any\n \n@@ -49,13 +50,15 @@\n \n \n def convert_kwargs_to_snake_case(func: Callable) -> Callable:\n- def convert_to_snake_case(d: Dict) -> Dict:\n+ def convert_to_snake_case(m: Mapping) -> Dict:\n converted: Dict = {}\n- for k, v in d.items():\n- if isinstance(v, dict):\n+ for k, v in m.items():\n+ if isinstance(v, Mapping):\n v = convert_to_snake_case(v)\n if isinstance(v, list):\n- v = [convert_to_snake_case(i) if isinstance(i, dict) else i for i in v]\n+ v = [\n+ convert_to_snake_case(i) if isinstance(i, Mapping) else i for i in v\n+ ]\n converted[convert_camel_case_to_snake(k)] = v\n return converted\n", "issue": "snake_case_fallback_resolvers not calling obj.get(attr_name)\n**Ariadne version:** 0.13.0\r\n**Python version:** 3.8.11\r\n\r\nHello. I am using the [databases](https://www.encode.io/databases/) package with an [asyncpg](https://magicstack.github.io/asyncpg/current/) backend to interact with a PostgreSQL database. The objects returned from my queries are of the type `databases.backends.postgres.Record`. The desired attributes can only can accessed via the get method. However, when I use `snake_case_fallback_resolvers`, Ariadne has trouble resolving the requested fields and I receive the following error: `Cannot return null for non-nullable field`\r\n\r\nIf I instead use the regular `fallback_resolvers` (adjusting my schema's naming conventions), Ariadne is able to resolve the requested fields.\r\n\r\nIs this a bug or am I doing something wrong? Thank you for your time.\r\n\n", "before_files": [{"content": "import asyncio\nfrom functools import wraps\nfrom typing import Optional, Union, Callable, Dict, Any\n\nfrom graphql import GraphQLError, parse\n\n\ndef convert_camel_case_to_snake(graphql_name: str) -> str:\n # pylint: disable=too-many-boolean-expressions\n max_index = len(graphql_name) - 1\n lowered_name = graphql_name.lower()\n\n python_name = \"\"\n for i, c in enumerate(lowered_name):\n if i > 0 and (\n # testWord -> test_word\n (\n c != graphql_name[i]\n and graphql_name[i - 1] != \"_\"\n and graphql_name[i - 1] == python_name[-1]\n )\n # TESTWord -> test_word\n or (\n i < max_index\n and graphql_name[i] != lowered_name[i]\n and graphql_name[i + 1] == lowered_name[i + 1]\n )\n # test134 -> test_134\n or (c.isdigit() and not graphql_name[i - 1].isdigit())\n # 134test -> 134_test\n or (not c.isdigit() and graphql_name[i - 1].isdigit())\n ):\n python_name += \"_\"\n python_name += c\n return python_name\n\n\ndef gql(value: str) -> str:\n parse(value)\n return value\n\n\ndef unwrap_graphql_error(\n error: Union[GraphQLError, Optional[Exception]]\n) -> Optional[Exception]:\n if isinstance(error, GraphQLError):\n return unwrap_graphql_error(error.original_error)\n return error\n\n\ndef convert_kwargs_to_snake_case(func: Callable) -> Callable:\n def convert_to_snake_case(d: Dict) -> Dict:\n converted: Dict = {}\n for k, v in d.items():\n if isinstance(v, dict):\n v = convert_to_snake_case(v)\n if isinstance(v, list):\n v = [convert_to_snake_case(i) if isinstance(i, dict) else i for i in v]\n converted[convert_camel_case_to_snake(k)] = v\n return converted\n\n if asyncio.iscoroutinefunction(func):\n\n @wraps(func)\n async def async_wrapper(*args: Any, **kwargs: Any) -> Any:\n return await func(*args, **convert_to_snake_case(kwargs))\n\n return async_wrapper\n\n @wraps(func)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n return func(*args, **convert_to_snake_case(kwargs))\n\n return wrapper\n", "path": "ariadne/utils.py"}, {"content": "from typing import Any\n\nfrom graphql import default_field_resolver\nfrom graphql.type import (\n GraphQLField,\n GraphQLObjectType,\n GraphQLResolveInfo,\n GraphQLSchema,\n)\n\nfrom .types import Resolver, SchemaBindable\nfrom .utils import convert_camel_case_to_snake\n\n\nclass FallbackResolversSetter(SchemaBindable):\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n for type_object in schema.type_map.values():\n if isinstance(type_object, GraphQLObjectType):\n self.add_resolvers_to_object_fields(type_object)\n\n def add_resolvers_to_object_fields(self, type_object) -> None:\n for field_name, field_object in type_object.fields.items():\n self.add_resolver_to_field(field_name, field_object)\n\n def add_resolver_to_field(self, _: str, field_object: GraphQLField) -> None:\n if field_object.resolve is None:\n field_object.resolve = default_field_resolver\n\n\nclass SnakeCaseFallbackResolversSetter(FallbackResolversSetter):\n def add_resolver_to_field(\n self, field_name: str, field_object: GraphQLField\n ) -> None:\n if field_object.resolve is None:\n field_name = convert_camel_case_to_snake(field_name)\n field_object.resolve = resolve_to(field_name)\n\n\nfallback_resolvers = FallbackResolversSetter()\nsnake_case_fallback_resolvers = SnakeCaseFallbackResolversSetter()\n\n\ndef resolve_parent_field(parent: Any, field_name: str) -> Any:\n if isinstance(parent, dict):\n return parent.get(field_name)\n return getattr(parent, field_name, None)\n\n\ndef resolve_to(field_name: str) -> Resolver:\n def resolver(parent: Any, info: GraphQLResolveInfo, **kwargs) -> Any:\n value = resolve_parent_field(parent, field_name)\n if callable(value):\n return value(info, **kwargs)\n return value\n\n # pylint: disable=protected-access\n resolver._ariadne_alias_resolver = True # type: ignore\n return resolver\n\n\ndef is_default_resolver(resolver: Resolver) -> bool:\n # pylint: disable=comparison-with-callable\n if resolver == default_field_resolver:\n return True\n return hasattr(resolver, \"_ariadne_alias_resolver\")\n", "path": "ariadne/resolvers.py"}], "after_files": [{"content": "import asyncio\nfrom collections.abc import Mapping\nfrom functools import wraps\nfrom typing import Optional, Union, Callable, Dict, Any\n\nfrom graphql import GraphQLError, parse\n\n\ndef convert_camel_case_to_snake(graphql_name: str) -> str:\n # pylint: disable=too-many-boolean-expressions\n max_index = len(graphql_name) - 1\n lowered_name = graphql_name.lower()\n\n python_name = \"\"\n for i, c in enumerate(lowered_name):\n if i > 0 and (\n # testWord -> test_word\n (\n c != graphql_name[i]\n and graphql_name[i - 1] != \"_\"\n and graphql_name[i - 1] == python_name[-1]\n )\n # TESTWord -> test_word\n or (\n i < max_index\n and graphql_name[i] != lowered_name[i]\n and graphql_name[i + 1] == lowered_name[i + 1]\n )\n # test134 -> test_134\n or (c.isdigit() and not graphql_name[i - 1].isdigit())\n # 134test -> 134_test\n or (not c.isdigit() and graphql_name[i - 1].isdigit())\n ):\n python_name += \"_\"\n python_name += c\n return python_name\n\n\ndef gql(value: str) -> str:\n parse(value)\n return value\n\n\ndef unwrap_graphql_error(\n error: Union[GraphQLError, Optional[Exception]]\n) -> Optional[Exception]:\n if isinstance(error, GraphQLError):\n return unwrap_graphql_error(error.original_error)\n return error\n\n\ndef convert_kwargs_to_snake_case(func: Callable) -> Callable:\n def convert_to_snake_case(m: Mapping) -> Dict:\n converted: Dict = {}\n for k, v in m.items():\n if isinstance(v, Mapping):\n v = convert_to_snake_case(v)\n if isinstance(v, list):\n v = [\n convert_to_snake_case(i) if isinstance(i, Mapping) else i for i in v\n ]\n converted[convert_camel_case_to_snake(k)] = v\n return converted\n\n if asyncio.iscoroutinefunction(func):\n\n @wraps(func)\n async def async_wrapper(*args: Any, **kwargs: Any) -> Any:\n return await func(*args, **convert_to_snake_case(kwargs))\n\n return async_wrapper\n\n @wraps(func)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n return func(*args, **convert_to_snake_case(kwargs))\n\n return wrapper\n", "path": "ariadne/utils.py"}, {"content": "from collections.abc import Mapping\nfrom typing import Any\n\nfrom graphql import default_field_resolver\nfrom graphql.type import (\n GraphQLField,\n GraphQLObjectType,\n GraphQLResolveInfo,\n GraphQLSchema,\n)\n\nfrom .types import Resolver, SchemaBindable\nfrom .utils import convert_camel_case_to_snake\n\n\nclass FallbackResolversSetter(SchemaBindable):\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n for type_object in schema.type_map.values():\n if isinstance(type_object, GraphQLObjectType):\n self.add_resolvers_to_object_fields(type_object)\n\n def add_resolvers_to_object_fields(self, type_object) -> None:\n for field_name, field_object in type_object.fields.items():\n self.add_resolver_to_field(field_name, field_object)\n\n def add_resolver_to_field(self, _: str, field_object: GraphQLField) -> None:\n if field_object.resolve is None:\n field_object.resolve = default_field_resolver\n\n\nclass SnakeCaseFallbackResolversSetter(FallbackResolversSetter):\n def add_resolver_to_field(\n self, field_name: str, field_object: GraphQLField\n ) -> None:\n if field_object.resolve is None:\n field_name = convert_camel_case_to_snake(field_name)\n field_object.resolve = resolve_to(field_name)\n\n\nfallback_resolvers = FallbackResolversSetter()\nsnake_case_fallback_resolvers = SnakeCaseFallbackResolversSetter()\n\n\ndef resolve_parent_field(parent: Any, field_name: str) -> Any:\n if isinstance(parent, Mapping):\n return parent.get(field_name)\n return getattr(parent, field_name, None)\n\n\ndef resolve_to(field_name: str) -> Resolver:\n def resolver(parent: Any, info: GraphQLResolveInfo, **kwargs) -> Any:\n value = resolve_parent_field(parent, field_name)\n if callable(value):\n return value(info, **kwargs)\n return value\n\n # pylint: disable=protected-access\n resolver._ariadne_alias_resolver = True # type: ignore\n return resolver\n\n\ndef is_default_resolver(resolver: Resolver) -> bool:\n # pylint: disable=comparison-with-callable\n if resolver == default_field_resolver:\n return True\n return hasattr(resolver, \"_ariadne_alias_resolver\")\n", "path": "ariadne/resolvers.py"}]}
1,781
400
gh_patches_debug_16378
rasdani/github-patches
git_diff
freedomofpress__securedrop-379
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Display number of docs and messages per source in source list > At the moment each source in the list displays: source codename, last updated. It would be helpful to also see: total # of messages/docs. Extracted from #322 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `securedrop/db.py` Content: ``` 1 import os 2 import datetime 3 4 from sqlalchemy import create_engine, ForeignKey 5 from sqlalchemy.orm import scoped_session, sessionmaker, relationship, backref 6 from sqlalchemy.ext.declarative import declarative_base 7 from sqlalchemy import Column, Integer, String, Boolean, DateTime 8 from sqlalchemy.orm.exc import NoResultFound 9 10 import config 11 import crypto_util 12 import store 13 14 # http://flask.pocoo.org/docs/patterns/sqlalchemy/ 15 16 if config.DATABASE_ENGINE == "sqlite": 17 engine = create_engine( 18 config.DATABASE_ENGINE + ":///" + 19 config.DATABASE_FILE 20 ) 21 else: 22 engine = create_engine( 23 config.DATABASE_ENGINE + '://' + 24 config.DATABASE_USERNAME + ':' + 25 config.DATABASE_PASSWORD + '@' + 26 config.DATABASE_HOST + '/' + 27 config.DATABASE_NAME, echo=False 28 ) 29 30 db_session = scoped_session(sessionmaker(autocommit=False, 31 autoflush=False, 32 bind=engine)) 33 Base = declarative_base() 34 Base.query = db_session.query_property() 35 36 37 class Source(Base): 38 __tablename__ = 'sources' 39 id = Column(Integer, primary_key=True) 40 filesystem_id = Column(String(96), unique=True) 41 journalist_designation = Column(String(255), nullable=False) 42 flagged = Column(Boolean, default=False) 43 last_updated = Column(DateTime, default=datetime.datetime.now) 44 45 # sources are "pending" and don't get displayed to journalists until they submit something 46 pending = Column(Boolean, default=True) 47 48 # keep track of how many interactions have happened, for filenames 49 interaction_count = Column(Integer, default=0, nullable=False) 50 51 def __init__(self, filesystem_id=None, journalist_designation=None): 52 self.filesystem_id = filesystem_id 53 self.journalist_designation = journalist_designation 54 55 def __repr__(self): 56 return '<Source %r>' % (self.journalist_designation) 57 58 def journalist_filename(self): 59 valid_chars = 'abcdefghijklmnopqrstuvwxyz1234567890-_' 60 return ''.join([c for c in self.journalist_designation.lower().replace(' ', '_') if c in valid_chars]) 61 62 class Submission(Base): 63 __tablename__ = 'submissions' 64 id = Column(Integer, primary_key=True) 65 source_id = Column(Integer, ForeignKey('sources.id')) 66 source = relationship("Source", backref=backref('submissions', order_by=id)) 67 filename = Column(String(255), nullable=False) 68 size = Column(Integer, nullable=False) 69 70 def __init__(self, source, filename): 71 self.source_id = source.id 72 self.filename = filename 73 self.size = os.stat(store.path(source.filesystem_id, filename)).st_size 74 75 def __repr__(self): 76 return '<Submission %r>' % (self.filename) 77 78 79 # Declare (or import) models before init_db 80 def init_db(): 81 Base.metadata.create_all(bind=engine) 82 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/securedrop/db.py b/securedrop/db.py --- a/securedrop/db.py +++ b/securedrop/db.py @@ -59,6 +59,19 @@ valid_chars = 'abcdefghijklmnopqrstuvwxyz1234567890-_' return ''.join([c for c in self.journalist_designation.lower().replace(' ', '_') if c in valid_chars]) + def documents_messages_count(self): + try: + return self.docs_msgs_count + except AttributeError: + self.docs_msgs_count = {'messages': 0, 'documents': 0} + for submission in self.submissions: + if submission.filename.endswith('msg.gpg'): + self.docs_msgs_count['messages'] += 1 + elif submission.filename.endswith('doc.zip.gpg'): + self.docs_msgs_count['documents'] += 1 + return self.docs_msgs_count + + class Submission(Base): __tablename__ = 'submissions' id = Column(Integer, primary_key=True)
{"golden_diff": "diff --git a/securedrop/db.py b/securedrop/db.py\n--- a/securedrop/db.py\n+++ b/securedrop/db.py\n@@ -59,6 +59,19 @@\n valid_chars = 'abcdefghijklmnopqrstuvwxyz1234567890-_'\n return ''.join([c for c in self.journalist_designation.lower().replace(' ', '_') if c in valid_chars])\n \n+ def documents_messages_count(self):\n+ try:\n+ return self.docs_msgs_count\n+ except AttributeError:\n+ self.docs_msgs_count = {'messages': 0, 'documents': 0}\n+ for submission in self.submissions:\n+ if submission.filename.endswith('msg.gpg'):\n+ self.docs_msgs_count['messages'] += 1\n+ elif submission.filename.endswith('doc.zip.gpg'):\n+ self.docs_msgs_count['documents'] += 1\n+ return self.docs_msgs_count\n+\n+\n class Submission(Base):\n __tablename__ = 'submissions'\n id = Column(Integer, primary_key=True)\n", "issue": "Display number of docs and messages per source in source list\n> At the moment each source in the list displays: source codename, last updated. It would be helpful to also see: total # of messages/docs.\n\nExtracted from #322\n\n", "before_files": [{"content": "import os\nimport datetime\n\nfrom sqlalchemy import create_engine, ForeignKey\nfrom sqlalchemy.orm import scoped_session, sessionmaker, relationship, backref\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Boolean, DateTime\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport config\nimport crypto_util\nimport store\n\n# http://flask.pocoo.org/docs/patterns/sqlalchemy/\n\nif config.DATABASE_ENGINE == \"sqlite\":\n engine = create_engine(\n config.DATABASE_ENGINE + \":///\" +\n config.DATABASE_FILE\n )\nelse:\n engine = create_engine(\n config.DATABASE_ENGINE + '://' +\n config.DATABASE_USERNAME + ':' +\n config.DATABASE_PASSWORD + '@' +\n config.DATABASE_HOST + '/' +\n config.DATABASE_NAME, echo=False\n )\n\ndb_session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\nBase = declarative_base()\nBase.query = db_session.query_property()\n\n\nclass Source(Base):\n __tablename__ = 'sources'\n id = Column(Integer, primary_key=True)\n filesystem_id = Column(String(96), unique=True)\n journalist_designation = Column(String(255), nullable=False)\n flagged = Column(Boolean, default=False)\n last_updated = Column(DateTime, default=datetime.datetime.now)\n \n # sources are \"pending\" and don't get displayed to journalists until they submit something\n pending = Column(Boolean, default=True)\n\n # keep track of how many interactions have happened, for filenames\n interaction_count = Column(Integer, default=0, nullable=False)\n\n def __init__(self, filesystem_id=None, journalist_designation=None):\n self.filesystem_id = filesystem_id\n self.journalist_designation = journalist_designation\n\n def __repr__(self):\n return '<Source %r>' % (self.journalist_designation)\n\n def journalist_filename(self):\n valid_chars = 'abcdefghijklmnopqrstuvwxyz1234567890-_'\n return ''.join([c for c in self.journalist_designation.lower().replace(' ', '_') if c in valid_chars])\n\nclass Submission(Base):\n __tablename__ = 'submissions'\n id = Column(Integer, primary_key=True)\n source_id = Column(Integer, ForeignKey('sources.id'))\n source = relationship(\"Source\", backref=backref('submissions', order_by=id))\n filename = Column(String(255), nullable=False)\n size = Column(Integer, nullable=False)\n\n def __init__(self, source, filename):\n self.source_id = source.id\n self.filename = filename\n self.size = os.stat(store.path(source.filesystem_id, filename)).st_size\n\n def __repr__(self):\n return '<Submission %r>' % (self.filename)\n\n\n# Declare (or import) models before init_db\ndef init_db():\n Base.metadata.create_all(bind=engine)\n\n", "path": "securedrop/db.py"}], "after_files": [{"content": "import os\nimport datetime\n\nfrom sqlalchemy import create_engine, ForeignKey\nfrom sqlalchemy.orm import scoped_session, sessionmaker, relationship, backref\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Boolean, DateTime\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport config\nimport crypto_util\nimport store\n\n# http://flask.pocoo.org/docs/patterns/sqlalchemy/\n\nif config.DATABASE_ENGINE == \"sqlite\":\n engine = create_engine(\n config.DATABASE_ENGINE + \":///\" +\n config.DATABASE_FILE\n )\nelse:\n engine = create_engine(\n config.DATABASE_ENGINE + '://' +\n config.DATABASE_USERNAME + ':' +\n config.DATABASE_PASSWORD + '@' +\n config.DATABASE_HOST + '/' +\n config.DATABASE_NAME, echo=False\n )\n\ndb_session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\nBase = declarative_base()\nBase.query = db_session.query_property()\n\n\nclass Source(Base):\n __tablename__ = 'sources'\n id = Column(Integer, primary_key=True)\n filesystem_id = Column(String(96), unique=True)\n journalist_designation = Column(String(255), nullable=False)\n flagged = Column(Boolean, default=False)\n last_updated = Column(DateTime, default=datetime.datetime.now)\n \n # sources are \"pending\" and don't get displayed to journalists until they submit something\n pending = Column(Boolean, default=True)\n\n # keep track of how many interactions have happened, for filenames\n interaction_count = Column(Integer, default=0, nullable=False)\n\n def __init__(self, filesystem_id=None, journalist_designation=None):\n self.filesystem_id = filesystem_id\n self.journalist_designation = journalist_designation\n\n def __repr__(self):\n return '<Source %r>' % (self.journalist_designation)\n\n def journalist_filename(self):\n valid_chars = 'abcdefghijklmnopqrstuvwxyz1234567890-_'\n return ''.join([c for c in self.journalist_designation.lower().replace(' ', '_') if c in valid_chars])\n\n def documents_messages_count(self):\n try:\n return self.docs_msgs_count\n except AttributeError:\n self.docs_msgs_count = {'messages': 0, 'documents': 0}\n for submission in self.submissions:\n if submission.filename.endswith('msg.gpg'):\n self.docs_msgs_count['messages'] += 1\n elif submission.filename.endswith('doc.zip.gpg'):\n self.docs_msgs_count['documents'] += 1\n return self.docs_msgs_count\n\n\nclass Submission(Base):\n __tablename__ = 'submissions'\n id = Column(Integer, primary_key=True)\n source_id = Column(Integer, ForeignKey('sources.id'))\n source = relationship(\"Source\", backref=backref('submissions', order_by=id))\n filename = Column(String(255), nullable=False)\n size = Column(Integer, nullable=False)\n\n def __init__(self, source, filename):\n self.source_id = source.id\n self.filename = filename\n self.size = os.stat(store.path(source.filesystem_id, filename)).st_size\n\n def __repr__(self):\n return '<Submission %r>' % (self.filename)\n\n\n# Declare (or import) models before init_db\ndef init_db():\n Base.metadata.create_all(bind=engine)\n\n", "path": "securedrop/db.py"}]}
1,097
227
gh_patches_debug_38619
rasdani/github-patches
git_diff
sktime__sktime-1600
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Refactor issue #1043 Fixes #1043 Removed methods load_UCR_UEA_dataset & _load_dataset from datasets/base.py and moved them to utils/data_io.py --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sktime/transformations/panel/signature_based/_signature_method.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from sklearn.pipeline import Pipeline 3 from sktime.transformations.base import _PanelToTabularTransformer 4 from sktime.transformations.panel.signature_based._compute import ( 5 _WindowSignatureTransform, 6 ) 7 from sktime.transformations.panel.signature_based._augmentations import ( 8 _make_augmentation_pipeline, 9 ) 10 from sktime.transformations.panel.signature_based._checks import ( 11 _handle_sktime_signatures, 12 ) 13 14 15 class SignatureTransformer(_PanelToTabularTransformer): 16 """Transformation class from the signature method. 17 18 Follows the methodology laid out in the paper: 19 "A Generalised Signature Method for Multivariate Time Series" 20 21 Parameters 22 ---------- 23 augmentation_list: tuple of strings, contains the augmentations to be 24 applied before application of the signature transform. 25 window_name: str, The name of the window transform to apply. 26 window_depth: int, The depth of the dyadic window. (Active only if 27 `window_name == 'dyadic'`). 28 window_length: int, The length of the sliding/expanding window. (Active 29 only if `window_name in ['sliding, 'expanding']`. 30 window_step: int, The step of the sliding/expanding window. (Active 31 only if `window_name in ['sliding, 'expanding']`. 32 rescaling: str or None, The method of signature rescaling. 33 sig_tfm: str, String to specify the type of signature transform. One of: 34 ['signature', 'logsignature']). 35 depth: int, Signature truncation depth. 36 37 Attributes 38 ---------- 39 signature_method: sklearn.Pipeline, A sklearn pipeline object that contains 40 all the steps to extract the signature features. 41 """ 42 43 def __init__( 44 self, 45 augmentation_list=("basepoint", "addtime"), 46 window_name="dyadic", 47 window_depth=3, 48 window_length=None, 49 window_step=None, 50 rescaling=None, 51 sig_tfm="signature", 52 depth=4, 53 ): 54 super(SignatureTransformer, self).__init__() 55 self.augmentation_list = augmentation_list 56 self.window_name = window_name 57 self.window_depth = window_depth 58 self.window_length = window_length 59 self.window_step = window_step 60 self.rescaling = rescaling 61 self.sig_tfm = sig_tfm 62 self.depth = depth 63 64 self.setup_feature_pipeline() 65 66 def _assertions(self): 67 """Some assertions to run on initialisation.""" 68 assert not all( 69 [self.sig_tfm == "logsignature", self.rescaling == "post"] 70 ), "Cannot have post rescaling with the logsignature." 71 72 def setup_feature_pipeline(self): 73 """Sets up the signature method as an sklearn pipeline.""" 74 augmentation_step = _make_augmentation_pipeline(self.augmentation_list) 75 transform_step = _WindowSignatureTransform( 76 window_name=self.window_name, 77 window_depth=self.window_depth, 78 window_length=self.window_length, 79 window_step=self.window_step, 80 sig_tfm=self.sig_tfm, 81 sig_depth=self.depth, 82 rescaling=self.rescaling, 83 ) 84 85 # The so-called 'signature method' as defined in the reference paper 86 self.signature_method = Pipeline( 87 [ 88 ("augmentations", augmentation_step), 89 ("window_and_transform", transform_step), 90 ] 91 ) 92 93 @_handle_sktime_signatures(check_fitted=False) 94 def fit(self, data, labels=None): 95 self.signature_method.fit(data, labels) 96 self._is_fitted = True 97 return self 98 99 @_handle_sktime_signatures(check_fitted=True) 100 def transform(self, data, labels=None): 101 return self.signature_method.transform(data) 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sktime/transformations/panel/signature_based/_signature_method.py b/sktime/transformations/panel/signature_based/_signature_method.py --- a/sktime/transformations/panel/signature_based/_signature_method.py +++ b/sktime/transformations/panel/signature_based/_signature_method.py @@ -1,15 +1,16 @@ # -*- coding: utf-8 -*- from sklearn.pipeline import Pipeline + from sktime.transformations.base import _PanelToTabularTransformer -from sktime.transformations.panel.signature_based._compute import ( - _WindowSignatureTransform, -) from sktime.transformations.panel.signature_based._augmentations import ( _make_augmentation_pipeline, ) from sktime.transformations.panel.signature_based._checks import ( _handle_sktime_signatures, ) +from sktime.transformations.panel.signature_based._compute import ( + _WindowSignatureTransform, +) class SignatureTransformer(_PanelToTabularTransformer): @@ -63,14 +64,8 @@ self.setup_feature_pipeline() - def _assertions(self): - """Some assertions to run on initialisation.""" - assert not all( - [self.sig_tfm == "logsignature", self.rescaling == "post"] - ), "Cannot have post rescaling with the logsignature." - def setup_feature_pipeline(self): - """Sets up the signature method as an sklearn pipeline.""" + """Set up the signature method as an sklearn pipeline.""" augmentation_step = _make_augmentation_pipeline(self.augmentation_list) transform_step = _WindowSignatureTransform( window_name=self.window_name, @@ -92,10 +87,38 @@ @_handle_sktime_signatures(check_fitted=False) def fit(self, data, labels=None): + """Fit to data, then transform it. + + Parameters + ---------- + data: pd.Dataframe or np.ndarray (3d array) + Data to transform. + labels: np.ndarray (1d array) or pd.series or list + Labels for the data. + + Returns + ------- + pd.Dataframe or np.ndarray or pd.series + Transformed data. + """ self.signature_method.fit(data, labels) self._is_fitted = True return self @_handle_sktime_signatures(check_fitted=True) def transform(self, data, labels=None): + """Transform the class from the signature method. + + Parameters + ---------- + data: pd.Dataframe or np.ndarray (3d array) + Data to transform. + labels: np.ndarray (1d array) or pd.series or list + Labels for the data. + + Returns + ------- + pd.Dataframe or np.ndarray or pd.series + Transformed data. + """ return self.signature_method.transform(data)
{"golden_diff": "diff --git a/sktime/transformations/panel/signature_based/_signature_method.py b/sktime/transformations/panel/signature_based/_signature_method.py\n--- a/sktime/transformations/panel/signature_based/_signature_method.py\n+++ b/sktime/transformations/panel/signature_based/_signature_method.py\n@@ -1,15 +1,16 @@\n # -*- coding: utf-8 -*-\n from sklearn.pipeline import Pipeline\n+\n from sktime.transformations.base import _PanelToTabularTransformer\n-from sktime.transformations.panel.signature_based._compute import (\n- _WindowSignatureTransform,\n-)\n from sktime.transformations.panel.signature_based._augmentations import (\n _make_augmentation_pipeline,\n )\n from sktime.transformations.panel.signature_based._checks import (\n _handle_sktime_signatures,\n )\n+from sktime.transformations.panel.signature_based._compute import (\n+ _WindowSignatureTransform,\n+)\n \n \n class SignatureTransformer(_PanelToTabularTransformer):\n@@ -63,14 +64,8 @@\n \n self.setup_feature_pipeline()\n \n- def _assertions(self):\n- \"\"\"Some assertions to run on initialisation.\"\"\"\n- assert not all(\n- [self.sig_tfm == \"logsignature\", self.rescaling == \"post\"]\n- ), \"Cannot have post rescaling with the logsignature.\"\n-\n def setup_feature_pipeline(self):\n- \"\"\"Sets up the signature method as an sklearn pipeline.\"\"\"\n+ \"\"\"Set up the signature method as an sklearn pipeline.\"\"\"\n augmentation_step = _make_augmentation_pipeline(self.augmentation_list)\n transform_step = _WindowSignatureTransform(\n window_name=self.window_name,\n@@ -92,10 +87,38 @@\n \n @_handle_sktime_signatures(check_fitted=False)\n def fit(self, data, labels=None):\n+ \"\"\"Fit to data, then transform it.\n+\n+ Parameters\n+ ----------\n+ data: pd.Dataframe or np.ndarray (3d array)\n+ Data to transform.\n+ labels: np.ndarray (1d array) or pd.series or list\n+ Labels for the data.\n+\n+ Returns\n+ -------\n+ pd.Dataframe or np.ndarray or pd.series\n+ Transformed data.\n+ \"\"\"\n self.signature_method.fit(data, labels)\n self._is_fitted = True\n return self\n \n @_handle_sktime_signatures(check_fitted=True)\n def transform(self, data, labels=None):\n+ \"\"\"Transform the class from the signature method.\n+\n+ Parameters\n+ ----------\n+ data: pd.Dataframe or np.ndarray (3d array)\n+ Data to transform.\n+ labels: np.ndarray (1d array) or pd.series or list\n+ Labels for the data.\n+\n+ Returns\n+ -------\n+ pd.Dataframe or np.ndarray or pd.series\n+ Transformed data.\n+ \"\"\"\n return self.signature_method.transform(data)\n", "issue": "Refactor issue #1043\nFixes #1043 \r\n\r\nRemoved methods load_UCR_UEA_dataset & _load_dataset from datasets/base.py and moved them to utils/data_io.py\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom sklearn.pipeline import Pipeline\nfrom sktime.transformations.base import _PanelToTabularTransformer\nfrom sktime.transformations.panel.signature_based._compute import (\n _WindowSignatureTransform,\n)\nfrom sktime.transformations.panel.signature_based._augmentations import (\n _make_augmentation_pipeline,\n)\nfrom sktime.transformations.panel.signature_based._checks import (\n _handle_sktime_signatures,\n)\n\n\nclass SignatureTransformer(_PanelToTabularTransformer):\n \"\"\"Transformation class from the signature method.\n\n Follows the methodology laid out in the paper:\n \"A Generalised Signature Method for Multivariate Time Series\"\n\n Parameters\n ----------\n augmentation_list: tuple of strings, contains the augmentations to be\n applied before application of the signature transform.\n window_name: str, The name of the window transform to apply.\n window_depth: int, The depth of the dyadic window. (Active only if\n `window_name == 'dyadic'`).\n window_length: int, The length of the sliding/expanding window. (Active\n only if `window_name in ['sliding, 'expanding']`.\n window_step: int, The step of the sliding/expanding window. (Active\n only if `window_name in ['sliding, 'expanding']`.\n rescaling: str or None, The method of signature rescaling.\n sig_tfm: str, String to specify the type of signature transform. One of:\n ['signature', 'logsignature']).\n depth: int, Signature truncation depth.\n\n Attributes\n ----------\n signature_method: sklearn.Pipeline, A sklearn pipeline object that contains\n all the steps to extract the signature features.\n \"\"\"\n\n def __init__(\n self,\n augmentation_list=(\"basepoint\", \"addtime\"),\n window_name=\"dyadic\",\n window_depth=3,\n window_length=None,\n window_step=None,\n rescaling=None,\n sig_tfm=\"signature\",\n depth=4,\n ):\n super(SignatureTransformer, self).__init__()\n self.augmentation_list = augmentation_list\n self.window_name = window_name\n self.window_depth = window_depth\n self.window_length = window_length\n self.window_step = window_step\n self.rescaling = rescaling\n self.sig_tfm = sig_tfm\n self.depth = depth\n\n self.setup_feature_pipeline()\n\n def _assertions(self):\n \"\"\"Some assertions to run on initialisation.\"\"\"\n assert not all(\n [self.sig_tfm == \"logsignature\", self.rescaling == \"post\"]\n ), \"Cannot have post rescaling with the logsignature.\"\n\n def setup_feature_pipeline(self):\n \"\"\"Sets up the signature method as an sklearn pipeline.\"\"\"\n augmentation_step = _make_augmentation_pipeline(self.augmentation_list)\n transform_step = _WindowSignatureTransform(\n window_name=self.window_name,\n window_depth=self.window_depth,\n window_length=self.window_length,\n window_step=self.window_step,\n sig_tfm=self.sig_tfm,\n sig_depth=self.depth,\n rescaling=self.rescaling,\n )\n\n # The so-called 'signature method' as defined in the reference paper\n self.signature_method = Pipeline(\n [\n (\"augmentations\", augmentation_step),\n (\"window_and_transform\", transform_step),\n ]\n )\n\n @_handle_sktime_signatures(check_fitted=False)\n def fit(self, data, labels=None):\n self.signature_method.fit(data, labels)\n self._is_fitted = True\n return self\n\n @_handle_sktime_signatures(check_fitted=True)\n def transform(self, data, labels=None):\n return self.signature_method.transform(data)\n", "path": "sktime/transformations/panel/signature_based/_signature_method.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom sklearn.pipeline import Pipeline\n\nfrom sktime.transformations.base import _PanelToTabularTransformer\nfrom sktime.transformations.panel.signature_based._augmentations import (\n _make_augmentation_pipeline,\n)\nfrom sktime.transformations.panel.signature_based._checks import (\n _handle_sktime_signatures,\n)\nfrom sktime.transformations.panel.signature_based._compute import (\n _WindowSignatureTransform,\n)\n\n\nclass SignatureTransformer(_PanelToTabularTransformer):\n \"\"\"Transformation class from the signature method.\n\n Follows the methodology laid out in the paper:\n \"A Generalised Signature Method for Multivariate Time Series\"\n\n Parameters\n ----------\n augmentation_list: tuple of strings, contains the augmentations to be\n applied before application of the signature transform.\n window_name: str, The name of the window transform to apply.\n window_depth: int, The depth of the dyadic window. (Active only if\n `window_name == 'dyadic'`).\n window_length: int, The length of the sliding/expanding window. (Active\n only if `window_name in ['sliding, 'expanding']`.\n window_step: int, The step of the sliding/expanding window. (Active\n only if `window_name in ['sliding, 'expanding']`.\n rescaling: str or None, The method of signature rescaling.\n sig_tfm: str, String to specify the type of signature transform. One of:\n ['signature', 'logsignature']).\n depth: int, Signature truncation depth.\n\n Attributes\n ----------\n signature_method: sklearn.Pipeline, A sklearn pipeline object that contains\n all the steps to extract the signature features.\n \"\"\"\n\n def __init__(\n self,\n augmentation_list=(\"basepoint\", \"addtime\"),\n window_name=\"dyadic\",\n window_depth=3,\n window_length=None,\n window_step=None,\n rescaling=None,\n sig_tfm=\"signature\",\n depth=4,\n ):\n super(SignatureTransformer, self).__init__()\n self.augmentation_list = augmentation_list\n self.window_name = window_name\n self.window_depth = window_depth\n self.window_length = window_length\n self.window_step = window_step\n self.rescaling = rescaling\n self.sig_tfm = sig_tfm\n self.depth = depth\n\n self.setup_feature_pipeline()\n\n def setup_feature_pipeline(self):\n \"\"\"Set up the signature method as an sklearn pipeline.\"\"\"\n augmentation_step = _make_augmentation_pipeline(self.augmentation_list)\n transform_step = _WindowSignatureTransform(\n window_name=self.window_name,\n window_depth=self.window_depth,\n window_length=self.window_length,\n window_step=self.window_step,\n sig_tfm=self.sig_tfm,\n sig_depth=self.depth,\n rescaling=self.rescaling,\n )\n\n # The so-called 'signature method' as defined in the reference paper\n self.signature_method = Pipeline(\n [\n (\"augmentations\", augmentation_step),\n (\"window_and_transform\", transform_step),\n ]\n )\n\n @_handle_sktime_signatures(check_fitted=False)\n def fit(self, data, labels=None):\n \"\"\"Fit to data, then transform it.\n\n Parameters\n ----------\n data: pd.Dataframe or np.ndarray (3d array)\n Data to transform.\n labels: np.ndarray (1d array) or pd.series or list\n Labels for the data.\n\n Returns\n -------\n pd.Dataframe or np.ndarray or pd.series\n Transformed data.\n \"\"\"\n self.signature_method.fit(data, labels)\n self._is_fitted = True\n return self\n\n @_handle_sktime_signatures(check_fitted=True)\n def transform(self, data, labels=None):\n \"\"\"Transform the class from the signature method.\n\n Parameters\n ----------\n data: pd.Dataframe or np.ndarray (3d array)\n Data to transform.\n labels: np.ndarray (1d array) or pd.series or list\n Labels for the data.\n\n Returns\n -------\n pd.Dataframe or np.ndarray or pd.series\n Transformed data.\n \"\"\"\n return self.signature_method.transform(data)\n", "path": "sktime/transformations/panel/signature_based/_signature_method.py"}]}
1,294
629
gh_patches_debug_56181
rasdani/github-patches
git_diff
TOMToolkit__tom_base-196
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Missing dataclasses Following the tom_base install instructions, I pip installed the requirements.txt and then tried > ./manage.py migrate which ended with the following error: File "<frozen importlib._bootstrap_external>", line 678, in exec_module File "<frozen importlib._bootstrap>", line 205, in _call_with_frames_removed File "/Users/rstreet/software/tom_base/tom_alerts/urls.py", line 3, in <module> from tom_alerts.views import BrokerQueryCreateView, BrokerQueryListView, BrokerQueryUpdateView, RunQueryView File "/Users/rstreet/software/tom_base/tom_alerts/views.py", line 3, in <module> from tom_alerts.alerts import get_service_class, get_service_classes File "/Users/rstreet/software/tom_base/tom_alerts/alerts.py", line 5, in <module> from dataclasses import dataclass ModuleNotFoundError: No module named 'dataclasses' --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup, find_packages 2 from os import path 3 4 here = path.abspath(path.dirname(__file__)) 5 with open(path.join(here, 'README.md'), encoding='utf-8') as f: 6 long_description = f.read() 7 8 setup( 9 name='tomtoolkit', 10 version='1.1.0', 11 description='The TOM Toolkit and base modules', 12 long_description=long_description, 13 long_description_content_type='text/markdown', 14 url='https://tomtoolkit.github.io', 15 author='TOM Toolkit Project', 16 author_email='ariba@lco.global', 17 classifiers=[ 18 'Development Status :: 3 - Alpha', 19 'Intended Audience :: Science/Research', 20 'License :: OSI Approved :: BSD License', 21 'Operating System :: OS Independent', 22 'Programming Language :: Python :: 3', 23 'Programming Language :: Python :: 3.7', 24 'Topic :: Scientific/Engineering :: Astronomy', 25 'Topic :: Scientific/Engineering :: Physics' 26 ], 27 keywords=['tomtoolkit', 'astronomy', 'astrophysics', 'cosmology', 'science', 'fits', 'observatory'], 28 packages=find_packages(), 29 install_requires=[ 30 'django', 31 'django-bootstrap4', 32 'django-extensions', 33 'django-filter', 34 'django-contrib-comments', 35 'django-gravatar2', 36 'django-crispy-forms', 37 'django-guardian', 38 'numpy', 39 'python-dateutil', 40 'requests', 41 'astroquery', 42 'astropy', 43 'astroplan', 44 'plotly', 45 'matplotlib', 46 'pillow', 47 'fits2image', 48 'specutils', 49 ], 50 extras_require={ 51 'test': ['factory_boy'] 52 }, 53 include_package_data=True, 54 ) 55 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -46,6 +46,7 @@ 'pillow', 'fits2image', 'specutils', + "dataclasses; python_version < '3.7'", ], extras_require={ 'test': ['factory_boy']
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,6 +46,7 @@\n 'pillow',\n 'fits2image',\n 'specutils',\n+ \"dataclasses; python_version < '3.7'\",\n ],\n extras_require={\n 'test': ['factory_boy']\n", "issue": "Missing dataclasses\nFollowing the tom_base install instructions, I pip installed the requirements.txt and then tried \r\n> ./manage.py migrate\r\n\r\nwhich ended with the following error:\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 205, in _call_with_frames_removed\r\n File \"/Users/rstreet/software/tom_base/tom_alerts/urls.py\", line 3, in <module>\r\n from tom_alerts.views import BrokerQueryCreateView, BrokerQueryListView, BrokerQueryUpdateView, RunQueryView\r\n File \"/Users/rstreet/software/tom_base/tom_alerts/views.py\", line 3, in <module>\r\n from tom_alerts.alerts import get_service_class, get_service_classes\r\n File \"/Users/rstreet/software/tom_base/tom_alerts/alerts.py\", line 5, in <module>\r\n from dataclasses import dataclass\r\nModuleNotFoundError: No module named 'dataclasses'\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='tomtoolkit',\n version='1.1.0',\n description='The TOM Toolkit and base modules',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://tomtoolkit.github.io',\n author='TOM Toolkit Project',\n author_email='ariba@lco.global',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Physics'\n ],\n keywords=['tomtoolkit', 'astronomy', 'astrophysics', 'cosmology', 'science', 'fits', 'observatory'],\n packages=find_packages(),\n install_requires=[\n 'django',\n 'django-bootstrap4',\n 'django-extensions',\n 'django-filter',\n 'django-contrib-comments',\n 'django-gravatar2',\n 'django-crispy-forms',\n 'django-guardian',\n 'numpy',\n 'python-dateutil',\n 'requests',\n 'astroquery',\n 'astropy',\n 'astroplan',\n 'plotly',\n 'matplotlib',\n 'pillow',\n 'fits2image',\n 'specutils',\n ],\n extras_require={\n 'test': ['factory_boy']\n },\n include_package_data=True,\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='tomtoolkit',\n version='1.1.0',\n description='The TOM Toolkit and base modules',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://tomtoolkit.github.io',\n author='TOM Toolkit Project',\n author_email='ariba@lco.global',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Physics'\n ],\n keywords=['tomtoolkit', 'astronomy', 'astrophysics', 'cosmology', 'science', 'fits', 'observatory'],\n packages=find_packages(),\n install_requires=[\n 'django',\n 'django-bootstrap4',\n 'django-extensions',\n 'django-filter',\n 'django-contrib-comments',\n 'django-gravatar2',\n 'django-crispy-forms',\n 'django-guardian',\n 'numpy',\n 'python-dateutil',\n 'requests',\n 'astroquery',\n 'astropy',\n 'astroplan',\n 'plotly',\n 'matplotlib',\n 'pillow',\n 'fits2image',\n 'specutils',\n \"dataclasses; python_version < '3.7'\",\n ],\n extras_require={\n 'test': ['factory_boy']\n },\n include_package_data=True,\n)\n", "path": "setup.py"}]}
951
77
gh_patches_debug_29137
rasdani/github-patches
git_diff
spack__spack-4584
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- building flex with +lex variant fails Using an older system (suse 13 with python 2.7.6) and the symlink code in the package fails entirely. @mjwoods --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `var/spack/repos/builtin/packages/flex/package.py` Content: ``` 1 ############################################################################## 2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. 3 # Produced at the Lawrence Livermore National Laboratory. 4 # 5 # This file is part of Spack. 6 # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. 7 # LLNL-CODE-647188 8 # 9 # For details, see https://github.com/llnl/spack 10 # Please also see the LICENSE file for our notice and the LGPL. 11 # 12 # This program is free software; you can redistribute it and/or modify 13 # it under the terms of the GNU Lesser General Public License (as 14 # published by the Free Software Foundation) version 2.1, February 1999. 15 # 16 # This program is distributed in the hope that it will be useful, but 17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF 18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and 19 # conditions of the GNU Lesser General Public License for more details. 20 # 21 # You should have received a copy of the GNU Lesser General Public 22 # License along with this program; if not, write to the Free Software 23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 ############################################################################## 25 from spack import * 26 import os 27 28 29 class Flex(AutotoolsPackage): 30 """Flex is a tool for generating scanners.""" 31 32 homepage = "https://github.com/westes/flex" 33 url = "https://github.com/westes/flex/releases/download/v2.6.1/flex-2.6.1.tar.gz" 34 35 version('2.6.3', 'a5f65570cd9107ec8a8ec88f17b31bb1') 36 # Problematic version: 37 # See issue #2554; https://github.com/westes/flex/issues/113 38 # version('2.6.2', 'cc6d76c333db7653d5caf423a3335239') 39 version('2.6.1', '05bcd8fb629e0ae130311e8a6106fa82') 40 version('2.6.0', '760be2ee9433e822b6eb65318311c19d') 41 version('2.5.39', '5865e76ac69c05699f476515592750d7') 42 43 variant('lex', default=True, 44 description="Provide symlinks for lex and libl") 45 46 depends_on('bison', type='build') 47 depends_on('gettext@0.19:', type='build') 48 depends_on('help2man', type='build') 49 50 # Older tarballs don't come with a configure script 51 depends_on('m4', type='build') 52 depends_on('autoconf', type='build', when='@:2.6.0') 53 depends_on('automake', type='build', when='@:2.6.0') 54 depends_on('libtool', type='build', when='@:2.6.0') 55 56 def url_for_version(self, version): 57 url = "https://github.com/westes/flex" 58 if version >= Version('2.6.1'): 59 url += "/releases/download/v{0}/flex-{0}.tar.gz".format(version) 60 elif version == Version('2.6.0'): 61 url += "/archive/v{0}.tar.gz".format(version) 62 elif version >= Version('2.5.37'): 63 url += "/archive/flex-{0}.tar.gz".format(version) 64 else: 65 url += "/archive/flex-{0}.tar.gz".format(version.dashed) 66 67 return url 68 69 @run_after('install') 70 def symlink_lex(self): 71 if self.spec.satisfies('+lex'): 72 dso = dso_suffix 73 for dir, flex, lex in \ 74 ((self.prefix.bin, 'flex', 'lex'), 75 (self.prefix.lib, 'libfl.a', 'libl.a'), 76 (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso)): 77 with working_dir(dir): 78 if (os.path.isfile(flex) and not 79 os.path.lexists(lex)): 80 symlink(flex, lex) 81 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/var/spack/repos/builtin/packages/flex/package.py b/var/spack/repos/builtin/packages/flex/package.py --- a/var/spack/repos/builtin/packages/flex/package.py +++ b/var/spack/repos/builtin/packages/flex/package.py @@ -32,6 +32,7 @@ homepage = "https://github.com/westes/flex" url = "https://github.com/westes/flex/releases/download/v2.6.1/flex-2.6.1.tar.gz" + version('2.6.4', '2882e3179748cc9f9c23ec593d6adc8d') version('2.6.3', 'a5f65570cd9107ec8a8ec88f17b31bb1') # Problematic version: # See issue #2554; https://github.com/westes/flex/issues/113 @@ -68,13 +69,17 @@ @run_after('install') def symlink_lex(self): + """Install symlinks for lex compatibility.""" if self.spec.satisfies('+lex'): dso = dso_suffix for dir, flex, lex in \ - ((self.prefix.bin, 'flex', 'lex'), - (self.prefix.lib, 'libfl.a', 'libl.a'), - (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso)): - with working_dir(dir): - if (os.path.isfile(flex) and not - os.path.lexists(lex)): - symlink(flex, lex) + ((self.prefix.bin, 'flex', 'lex'), + (self.prefix.lib, 'libfl.a', 'libl.a'), + (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso), + (self.prefix.lib64, 'libfl.a', 'libl.a'), + (self.prefix.lib64, 'libfl.' + dso, 'libl.' + dso)): + + if os.path.isdir(dir): + with working_dir(dir): + if (os.path.isfile(flex) and not os.path.lexists(lex)): + symlink(flex, lex)
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/flex/package.py b/var/spack/repos/builtin/packages/flex/package.py\n--- a/var/spack/repos/builtin/packages/flex/package.py\n+++ b/var/spack/repos/builtin/packages/flex/package.py\n@@ -32,6 +32,7 @@\n homepage = \"https://github.com/westes/flex\"\n url = \"https://github.com/westes/flex/releases/download/v2.6.1/flex-2.6.1.tar.gz\"\n \n+ version('2.6.4', '2882e3179748cc9f9c23ec593d6adc8d')\n version('2.6.3', 'a5f65570cd9107ec8a8ec88f17b31bb1')\n # Problematic version:\n # See issue #2554; https://github.com/westes/flex/issues/113\n@@ -68,13 +69,17 @@\n \n @run_after('install')\n def symlink_lex(self):\n+ \"\"\"Install symlinks for lex compatibility.\"\"\"\n if self.spec.satisfies('+lex'):\n dso = dso_suffix\n for dir, flex, lex in \\\n- ((self.prefix.bin, 'flex', 'lex'),\n- (self.prefix.lib, 'libfl.a', 'libl.a'),\n- (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso)):\n- with working_dir(dir):\n- if (os.path.isfile(flex) and not\n- os.path.lexists(lex)):\n- symlink(flex, lex)\n+ ((self.prefix.bin, 'flex', 'lex'),\n+ (self.prefix.lib, 'libfl.a', 'libl.a'),\n+ (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso),\n+ (self.prefix.lib64, 'libfl.a', 'libl.a'),\n+ (self.prefix.lib64, 'libfl.' + dso, 'libl.' + dso)):\n+\n+ if os.path.isdir(dir):\n+ with working_dir(dir):\n+ if (os.path.isfile(flex) and not os.path.lexists(lex)):\n+ symlink(flex, lex)\n", "issue": "building flex with +lex variant fails\nUsing an older system (suse 13 with python 2.7.6) and the symlink code in the package fails entirely.\r\n@mjwoods \r\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\nimport os\n\n\nclass Flex(AutotoolsPackage):\n \"\"\"Flex is a tool for generating scanners.\"\"\"\n\n homepage = \"https://github.com/westes/flex\"\n url = \"https://github.com/westes/flex/releases/download/v2.6.1/flex-2.6.1.tar.gz\"\n\n version('2.6.3', 'a5f65570cd9107ec8a8ec88f17b31bb1')\n # Problematic version:\n # See issue #2554; https://github.com/westes/flex/issues/113\n # version('2.6.2', 'cc6d76c333db7653d5caf423a3335239')\n version('2.6.1', '05bcd8fb629e0ae130311e8a6106fa82')\n version('2.6.0', '760be2ee9433e822b6eb65318311c19d')\n version('2.5.39', '5865e76ac69c05699f476515592750d7')\n\n variant('lex', default=True,\n description=\"Provide symlinks for lex and libl\")\n\n depends_on('bison', type='build')\n depends_on('gettext@0.19:', type='build')\n depends_on('help2man', type='build')\n\n # Older tarballs don't come with a configure script\n depends_on('m4', type='build')\n depends_on('autoconf', type='build', when='@:2.6.0')\n depends_on('automake', type='build', when='@:2.6.0')\n depends_on('libtool', type='build', when='@:2.6.0')\n\n def url_for_version(self, version):\n url = \"https://github.com/westes/flex\"\n if version >= Version('2.6.1'):\n url += \"/releases/download/v{0}/flex-{0}.tar.gz\".format(version)\n elif version == Version('2.6.0'):\n url += \"/archive/v{0}.tar.gz\".format(version)\n elif version >= Version('2.5.37'):\n url += \"/archive/flex-{0}.tar.gz\".format(version)\n else:\n url += \"/archive/flex-{0}.tar.gz\".format(version.dashed)\n\n return url\n\n @run_after('install')\n def symlink_lex(self):\n if self.spec.satisfies('+lex'):\n dso = dso_suffix\n for dir, flex, lex in \\\n ((self.prefix.bin, 'flex', 'lex'),\n (self.prefix.lib, 'libfl.a', 'libl.a'),\n (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso)):\n with working_dir(dir):\n if (os.path.isfile(flex) and not\n os.path.lexists(lex)):\n symlink(flex, lex)\n", "path": "var/spack/repos/builtin/packages/flex/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\nimport os\n\n\nclass Flex(AutotoolsPackage):\n \"\"\"Flex is a tool for generating scanners.\"\"\"\n\n homepage = \"https://github.com/westes/flex\"\n url = \"https://github.com/westes/flex/releases/download/v2.6.1/flex-2.6.1.tar.gz\"\n\n version('2.6.4', '2882e3179748cc9f9c23ec593d6adc8d')\n version('2.6.3', 'a5f65570cd9107ec8a8ec88f17b31bb1')\n # Problematic version:\n # See issue #2554; https://github.com/westes/flex/issues/113\n # version('2.6.2', 'cc6d76c333db7653d5caf423a3335239')\n version('2.6.1', '05bcd8fb629e0ae130311e8a6106fa82')\n version('2.6.0', '760be2ee9433e822b6eb65318311c19d')\n version('2.5.39', '5865e76ac69c05699f476515592750d7')\n\n variant('lex', default=True,\n description=\"Provide symlinks for lex and libl\")\n\n depends_on('bison', type='build')\n depends_on('gettext@0.19:', type='build')\n depends_on('help2man', type='build')\n\n # Older tarballs don't come with a configure script\n depends_on('m4', type='build')\n depends_on('autoconf', type='build', when='@:2.6.0')\n depends_on('automake', type='build', when='@:2.6.0')\n depends_on('libtool', type='build', when='@:2.6.0')\n\n def url_for_version(self, version):\n url = \"https://github.com/westes/flex\"\n if version >= Version('2.6.1'):\n url += \"/releases/download/v{0}/flex-{0}.tar.gz\".format(version)\n elif version == Version('2.6.0'):\n url += \"/archive/v{0}.tar.gz\".format(version)\n elif version >= Version('2.5.37'):\n url += \"/archive/flex-{0}.tar.gz\".format(version)\n else:\n url += \"/archive/flex-{0}.tar.gz\".format(version.dashed)\n\n return url\n\n @run_after('install')\n def symlink_lex(self):\n \"\"\"Install symlinks for lex compatibility.\"\"\"\n if self.spec.satisfies('+lex'):\n dso = dso_suffix\n for dir, flex, lex in \\\n ((self.prefix.bin, 'flex', 'lex'),\n (self.prefix.lib, 'libfl.a', 'libl.a'),\n (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso),\n (self.prefix.lib64, 'libfl.a', 'libl.a'),\n (self.prefix.lib64, 'libfl.' + dso, 'libl.' + dso)):\n\n if os.path.isdir(dir):\n with working_dir(dir):\n if (os.path.isfile(flex) and not os.path.lexists(lex)):\n symlink(flex, lex)\n", "path": "var/spack/repos/builtin/packages/flex/package.py"}]}
1,468
524
gh_patches_debug_15053
rasdani/github-patches
git_diff
deis__deis-4373
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Better error message when registration is disabled When `/deis/controller/registrationMode` is `disabled`, attempt to register returns ``` Registration failed: {"detail":"Authentication credentials were not provided."} ``` This message is misleading. It should explicitly say that registration is disabled. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `controller/api/permissions.py` Content: ``` 1 from rest_framework import permissions 2 from django.conf import settings 3 from django.contrib.auth.models import AnonymousUser 4 5 from api import models 6 7 8 def is_app_user(request, obj): 9 if request.user.is_superuser or \ 10 isinstance(obj, models.App) and obj.owner == request.user or \ 11 hasattr(obj, 'app') and obj.app.owner == request.user: 12 return True 13 elif request.user.has_perm('use_app', obj) or \ 14 hasattr(obj, 'app') and request.user.has_perm('use_app', obj.app): 15 return request.method != 'DELETE' 16 else: 17 return False 18 19 20 class IsAnonymous(permissions.BasePermission): 21 """ 22 View permission to allow anonymous users. 23 """ 24 25 def has_permission(self, request, view): 26 """ 27 Return `True` if permission is granted, `False` otherwise. 28 """ 29 return type(request.user) is AnonymousUser 30 31 32 class IsOwner(permissions.BasePermission): 33 """ 34 Object-level permission to allow only owners of an object to access it. 35 Assumes the model instance has an `owner` attribute. 36 """ 37 38 def has_object_permission(self, request, view, obj): 39 if hasattr(obj, 'owner'): 40 return obj.owner == request.user 41 else: 42 return False 43 44 45 class IsOwnerOrAdmin(permissions.BasePermission): 46 """ 47 Object-level permission to allow only owners of an object or administrators to access it. 48 Assumes the model instance has an `owner` attribute. 49 """ 50 def has_object_permission(self, request, view, obj): 51 if request.user.is_superuser: 52 return True 53 if hasattr(obj, 'owner'): 54 return obj.owner == request.user 55 else: 56 return False 57 58 59 class IsAppUser(permissions.BasePermission): 60 """ 61 Object-level permission to allow owners or collaborators to access 62 an app-related model. 63 """ 64 def has_object_permission(self, request, view, obj): 65 return is_app_user(request, obj) 66 67 68 class IsAdmin(permissions.BasePermission): 69 """ 70 View permission to allow only admins. 71 """ 72 73 def has_permission(self, request, view): 74 """ 75 Return `True` if permission is granted, `False` otherwise. 76 """ 77 return request.user.is_superuser 78 79 80 class IsAdminOrSafeMethod(permissions.BasePermission): 81 """ 82 View permission to allow only admins to use unsafe methods 83 including POST, PUT, DELETE. 84 85 This allows 86 """ 87 88 def has_permission(self, request, view): 89 """ 90 Return `True` if permission is granted, `False` otherwise. 91 """ 92 return request.method in permissions.SAFE_METHODS or request.user.is_superuser 93 94 95 class HasRegistrationAuth(permissions.BasePermission): 96 """ 97 Checks to see if registration is enabled 98 """ 99 def has_permission(self, request, view): 100 """ 101 If settings.REGISTRATION_MODE does not exist, such as during a test, return True 102 Return `True` if permission is granted, `False` otherwise. 103 """ 104 try: 105 if settings.REGISTRATION_MODE == 'disabled': 106 return False 107 if settings.REGISTRATION_MODE == 'enabled': 108 return True 109 elif settings.REGISTRATION_MODE == 'admin_only': 110 return request.user.is_superuser 111 else: 112 raise Exception("{} is not a valid registation mode" 113 .format(settings.REGISTRATION_MODE)) 114 except AttributeError: 115 return True 116 117 118 class HasBuilderAuth(permissions.BasePermission): 119 """ 120 View permission to allow builder to perform actions 121 with a special HTTP header 122 """ 123 124 def has_permission(self, request, view): 125 """ 126 Return `True` if permission is granted, `False` otherwise. 127 """ 128 auth_header = request.environ.get('HTTP_X_DEIS_BUILDER_AUTH') 129 if not auth_header: 130 return False 131 return auth_header == settings.BUILDER_KEY 132 133 134 class CanRegenerateToken(permissions.BasePermission): 135 """ 136 Checks if a user can regenerate a token 137 """ 138 139 def has_permission(self, request, view): 140 """ 141 Return `True` if permission is granted, `False` otherwise. 142 """ 143 if 'username' in request.data or 'all' in request.data: 144 return request.user.is_superuser 145 else: 146 return True 147 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/controller/api/permissions.py b/controller/api/permissions.py --- a/controller/api/permissions.py +++ b/controller/api/permissions.py @@ -1,3 +1,5 @@ + +from rest_framework import exceptions from rest_framework import permissions from django.conf import settings from django.contrib.auth.models import AnonymousUser @@ -103,7 +105,7 @@ """ try: if settings.REGISTRATION_MODE == 'disabled': - return False + raise exceptions.PermissionDenied('Registration is disabled') if settings.REGISTRATION_MODE == 'enabled': return True elif settings.REGISTRATION_MODE == 'admin_only':
{"golden_diff": "diff --git a/controller/api/permissions.py b/controller/api/permissions.py\n--- a/controller/api/permissions.py\n+++ b/controller/api/permissions.py\n@@ -1,3 +1,5 @@\n+\n+from rest_framework import exceptions\n from rest_framework import permissions\n from django.conf import settings\n from django.contrib.auth.models import AnonymousUser\n@@ -103,7 +105,7 @@\n \"\"\"\n try:\n if settings.REGISTRATION_MODE == 'disabled':\n- return False\n+ raise exceptions.PermissionDenied('Registration is disabled')\n if settings.REGISTRATION_MODE == 'enabled':\n return True\n elif settings.REGISTRATION_MODE == 'admin_only':\n", "issue": "Better error message when registration is disabled\nWhen `/deis/controller/registrationMode` is `disabled`, attempt to register returns\n\n```\nRegistration failed: {\"detail\":\"Authentication credentials were not provided.\"}\n```\n\nThis message is misleading. It should explicitly say that registration is disabled.\n\n", "before_files": [{"content": "from rest_framework import permissions\nfrom django.conf import settings\nfrom django.contrib.auth.models import AnonymousUser\n\nfrom api import models\n\n\ndef is_app_user(request, obj):\n if request.user.is_superuser or \\\n isinstance(obj, models.App) and obj.owner == request.user or \\\n hasattr(obj, 'app') and obj.app.owner == request.user:\n return True\n elif request.user.has_perm('use_app', obj) or \\\n hasattr(obj, 'app') and request.user.has_perm('use_app', obj.app):\n return request.method != 'DELETE'\n else:\n return False\n\n\nclass IsAnonymous(permissions.BasePermission):\n \"\"\"\n View permission to allow anonymous users.\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n return type(request.user) is AnonymousUser\n\n\nclass IsOwner(permissions.BasePermission):\n \"\"\"\n Object-level permission to allow only owners of an object to access it.\n Assumes the model instance has an `owner` attribute.\n \"\"\"\n\n def has_object_permission(self, request, view, obj):\n if hasattr(obj, 'owner'):\n return obj.owner == request.user\n else:\n return False\n\n\nclass IsOwnerOrAdmin(permissions.BasePermission):\n \"\"\"\n Object-level permission to allow only owners of an object or administrators to access it.\n Assumes the model instance has an `owner` attribute.\n \"\"\"\n def has_object_permission(self, request, view, obj):\n if request.user.is_superuser:\n return True\n if hasattr(obj, 'owner'):\n return obj.owner == request.user\n else:\n return False\n\n\nclass IsAppUser(permissions.BasePermission):\n \"\"\"\n Object-level permission to allow owners or collaborators to access\n an app-related model.\n \"\"\"\n def has_object_permission(self, request, view, obj):\n return is_app_user(request, obj)\n\n\nclass IsAdmin(permissions.BasePermission):\n \"\"\"\n View permission to allow only admins.\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n return request.user.is_superuser\n\n\nclass IsAdminOrSafeMethod(permissions.BasePermission):\n \"\"\"\n View permission to allow only admins to use unsafe methods\n including POST, PUT, DELETE.\n\n This allows\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n return request.method in permissions.SAFE_METHODS or request.user.is_superuser\n\n\nclass HasRegistrationAuth(permissions.BasePermission):\n \"\"\"\n Checks to see if registration is enabled\n \"\"\"\n def has_permission(self, request, view):\n \"\"\"\n If settings.REGISTRATION_MODE does not exist, such as during a test, return True\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n try:\n if settings.REGISTRATION_MODE == 'disabled':\n return False\n if settings.REGISTRATION_MODE == 'enabled':\n return True\n elif settings.REGISTRATION_MODE == 'admin_only':\n return request.user.is_superuser\n else:\n raise Exception(\"{} is not a valid registation mode\"\n .format(settings.REGISTRATION_MODE))\n except AttributeError:\n return True\n\n\nclass HasBuilderAuth(permissions.BasePermission):\n \"\"\"\n View permission to allow builder to perform actions\n with a special HTTP header\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n auth_header = request.environ.get('HTTP_X_DEIS_BUILDER_AUTH')\n if not auth_header:\n return False\n return auth_header == settings.BUILDER_KEY\n\n\nclass CanRegenerateToken(permissions.BasePermission):\n \"\"\"\n Checks if a user can regenerate a token\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n if 'username' in request.data or 'all' in request.data:\n return request.user.is_superuser\n else:\n return True\n", "path": "controller/api/permissions.py"}], "after_files": [{"content": "\nfrom rest_framework import exceptions\nfrom rest_framework import permissions\nfrom django.conf import settings\nfrom django.contrib.auth.models import AnonymousUser\n\nfrom api import models\n\n\ndef is_app_user(request, obj):\n if request.user.is_superuser or \\\n isinstance(obj, models.App) and obj.owner == request.user or \\\n hasattr(obj, 'app') and obj.app.owner == request.user:\n return True\n elif request.user.has_perm('use_app', obj) or \\\n hasattr(obj, 'app') and request.user.has_perm('use_app', obj.app):\n return request.method != 'DELETE'\n else:\n return False\n\n\nclass IsAnonymous(permissions.BasePermission):\n \"\"\"\n View permission to allow anonymous users.\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n return type(request.user) is AnonymousUser\n\n\nclass IsOwner(permissions.BasePermission):\n \"\"\"\n Object-level permission to allow only owners of an object to access it.\n Assumes the model instance has an `owner` attribute.\n \"\"\"\n\n def has_object_permission(self, request, view, obj):\n if hasattr(obj, 'owner'):\n return obj.owner == request.user\n else:\n return False\n\n\nclass IsOwnerOrAdmin(permissions.BasePermission):\n \"\"\"\n Object-level permission to allow only owners of an object or administrators to access it.\n Assumes the model instance has an `owner` attribute.\n \"\"\"\n def has_object_permission(self, request, view, obj):\n if request.user.is_superuser:\n return True\n if hasattr(obj, 'owner'):\n return obj.owner == request.user\n else:\n return False\n\n\nclass IsAppUser(permissions.BasePermission):\n \"\"\"\n Object-level permission to allow owners or collaborators to access\n an app-related model.\n \"\"\"\n def has_object_permission(self, request, view, obj):\n return is_app_user(request, obj)\n\n\nclass IsAdmin(permissions.BasePermission):\n \"\"\"\n View permission to allow only admins.\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n return request.user.is_superuser\n\n\nclass IsAdminOrSafeMethod(permissions.BasePermission):\n \"\"\"\n View permission to allow only admins to use unsafe methods\n including POST, PUT, DELETE.\n\n This allows\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n return request.method in permissions.SAFE_METHODS or request.user.is_superuser\n\n\nclass HasRegistrationAuth(permissions.BasePermission):\n \"\"\"\n Checks to see if registration is enabled\n \"\"\"\n def has_permission(self, request, view):\n \"\"\"\n If settings.REGISTRATION_MODE does not exist, such as during a test, return True\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n try:\n if settings.REGISTRATION_MODE == 'disabled':\n raise exceptions.PermissionDenied('Registration is disabled')\n if settings.REGISTRATION_MODE == 'enabled':\n return True\n elif settings.REGISTRATION_MODE == 'admin_only':\n return request.user.is_superuser\n else:\n raise Exception(\"{} is not a valid registation mode\"\n .format(settings.REGISTRATION_MODE))\n except AttributeError:\n return True\n\n\nclass HasBuilderAuth(permissions.BasePermission):\n \"\"\"\n View permission to allow builder to perform actions\n with a special HTTP header\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n auth_header = request.environ.get('HTTP_X_DEIS_BUILDER_AUTH')\n if not auth_header:\n return False\n return auth_header == settings.BUILDER_KEY\n\n\nclass CanRegenerateToken(permissions.BasePermission):\n \"\"\"\n Checks if a user can regenerate a token\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n if 'username' in request.data or 'all' in request.data:\n return request.user.is_superuser\n else:\n return True\n", "path": "controller/api/permissions.py"}]}
1,546
141
gh_patches_debug_37554
rasdani/github-patches
git_diff
litestar-org__litestar-1695
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `litestar/contrib/jwt/jwt_token.py` Content: ``` 1 from __future__ import annotations 2 3 from dataclasses import asdict, dataclass, field 4 from datetime import datetime, timezone 5 from typing import cast 6 7 from jose import JWSError, JWTError, jwt 8 9 from litestar.exceptions import ImproperlyConfiguredException, NotAuthorizedException 10 11 __all__ = ("Token",) 12 13 14 def _normalize_datetime(value: datetime) -> datetime: 15 """Convert the given value into UTC and strip microseconds. 16 17 Args: 18 value: A datetime instance 19 20 Returns: 21 A datetime instance 22 """ 23 if value.tzinfo is not None: 24 value.astimezone(timezone.utc) 25 26 return value.replace(microsecond=0) 27 28 29 @dataclass 30 class Token: 31 """JWT Token DTO.""" 32 33 exp: datetime 34 """Expiration - datetime for token expiration.""" 35 sub: str 36 """Subject - usually a unique identifier of the user or equivalent entity.""" 37 iat: datetime = field(default_factory=lambda: _normalize_datetime(datetime.now(timezone.utc))) 38 """Issued at - should always be current now.""" 39 iss: str | None = field(default=None) 40 """Issuer - optional unique identifier for the issuer.""" 41 aud: str | None = field(default=None) 42 """Audience - intended audience.""" 43 jti: str | None = field(default=None) 44 """JWT ID - a unique identifier of the JWT between different issuers.""" 45 46 def __post_init__(self) -> None: 47 if len(self.sub) < 1: 48 raise ImproperlyConfiguredException("sub must be a string with a length greater than 0") 49 50 if isinstance(self.exp, datetime) and ( 51 (exp := _normalize_datetime(self.exp)) 52 and exp.timestamp() >= _normalize_datetime(datetime.now(timezone.utc)).timestamp() 53 ): 54 self.exp = exp 55 else: 56 raise ImproperlyConfiguredException("exp value must be a datetime in the future") 57 58 if isinstance(self.iat, datetime) and ( 59 (iat := _normalize_datetime(self.iat)) 60 and iat.timestamp() <= _normalize_datetime(datetime.now(timezone.utc)).timestamp() 61 ): 62 self.iat = iat 63 else: 64 raise ImproperlyConfiguredException("iat must be a current or past time") 65 66 @staticmethod 67 def decode(encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Token: 68 """Decode a passed in token string and returns a Token instance. 69 70 Args: 71 encoded_token: A base64 string containing an encoded JWT. 72 secret: The secret with which the JWT is encoded. It may optionally be an individual JWK or JWS set dict 73 algorithm: The algorithm used to encode the JWT. 74 75 Returns: 76 A decoded Token instance. 77 78 Raises: 79 NotAuthorizedException: If the token is invalid. 80 """ 81 try: 82 payload = jwt.decode(token=encoded_token, key=secret, algorithms=[algorithm], options={"verify_aud": False}) 83 exp = datetime.fromtimestamp(payload.pop("exp"), tz=timezone.utc) 84 iat = datetime.fromtimestamp(payload.pop("iat"), tz=timezone.utc) 85 return Token(exp=exp, iat=iat, **payload) 86 except (KeyError, JWTError, ImproperlyConfiguredException) as e: 87 raise NotAuthorizedException("Invalid token") from e 88 89 def encode(self, secret: str, algorithm: str) -> str: 90 """Encode the token instance into a string. 91 92 Args: 93 secret: The secret with which the JWT is encoded. 94 algorithm: The algorithm used to encode the JWT. 95 96 Returns: 97 An encoded token string. 98 99 Raises: 100 ImproperlyConfiguredException: If encoding fails. 101 """ 102 try: 103 return cast( 104 "str", 105 jwt.encode( 106 claims={k: v for k, v in asdict(self).items() if v is not None}, key=secret, algorithm=algorithm 107 ), 108 ) 109 except (JWTError, JWSError) as e: 110 raise ImproperlyConfiguredException("Failed to encode token") from e 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/litestar/contrib/jwt/jwt_token.py b/litestar/contrib/jwt/jwt_token.py --- a/litestar/contrib/jwt/jwt_token.py +++ b/litestar/contrib/jwt/jwt_token.py @@ -1,13 +1,18 @@ from __future__ import annotations +import dataclasses from dataclasses import asdict, dataclass, field from datetime import datetime, timezone -from typing import cast +from typing import TYPE_CHECKING, Any, cast from jose import JWSError, JWTError, jwt from litestar.exceptions import ImproperlyConfiguredException, NotAuthorizedException +if TYPE_CHECKING: + from typing_extensions import Self + + __all__ = ("Token",) @@ -42,6 +47,8 @@ """Audience - intended audience.""" jti: str | None = field(default=None) """JWT ID - a unique identifier of the JWT between different issuers.""" + extras: dict[str, Any] = field(default_factory=dict) + """Extra fields that were found on the JWT token.""" def __post_init__(self) -> None: if len(self.sub) < 1: @@ -63,8 +70,8 @@ else: raise ImproperlyConfiguredException("iat must be a current or past time") - @staticmethod - def decode(encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Token: + @classmethod + def decode(cls, encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Self: """Decode a passed in token string and returns a Token instance. Args: @@ -82,7 +89,12 @@ payload = jwt.decode(token=encoded_token, key=secret, algorithms=[algorithm], options={"verify_aud": False}) exp = datetime.fromtimestamp(payload.pop("exp"), tz=timezone.utc) iat = datetime.fromtimestamp(payload.pop("iat"), tz=timezone.utc) - return Token(exp=exp, iat=iat, **payload) + field_names = {f.name for f in dataclasses.fields(Token)} + extra_fields = payload.keys() - field_names + extras = payload.pop("extras", {}) + for key in extra_fields: + extras[key] = payload.pop(key) + return cls(exp=exp, iat=iat, **payload, extras=extras) except (KeyError, JWTError, ImproperlyConfiguredException) as e: raise NotAuthorizedException("Invalid token") from e
{"golden_diff": "diff --git a/litestar/contrib/jwt/jwt_token.py b/litestar/contrib/jwt/jwt_token.py\n--- a/litestar/contrib/jwt/jwt_token.py\n+++ b/litestar/contrib/jwt/jwt_token.py\n@@ -1,13 +1,18 @@\n from __future__ import annotations\n \n+import dataclasses\n from dataclasses import asdict, dataclass, field\n from datetime import datetime, timezone\n-from typing import cast\n+from typing import TYPE_CHECKING, Any, cast\n \n from jose import JWSError, JWTError, jwt\n \n from litestar.exceptions import ImproperlyConfiguredException, NotAuthorizedException\n \n+if TYPE_CHECKING:\n+ from typing_extensions import Self\n+\n+\n __all__ = (\"Token\",)\n \n \n@@ -42,6 +47,8 @@\n \"\"\"Audience - intended audience.\"\"\"\n jti: str | None = field(default=None)\n \"\"\"JWT ID - a unique identifier of the JWT between different issuers.\"\"\"\n+ extras: dict[str, Any] = field(default_factory=dict)\n+ \"\"\"Extra fields that were found on the JWT token.\"\"\"\n \n def __post_init__(self) -> None:\n if len(self.sub) < 1:\n@@ -63,8 +70,8 @@\n else:\n raise ImproperlyConfiguredException(\"iat must be a current or past time\")\n \n- @staticmethod\n- def decode(encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Token:\n+ @classmethod\n+ def decode(cls, encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Self:\n \"\"\"Decode a passed in token string and returns a Token instance.\n \n Args:\n@@ -82,7 +89,12 @@\n payload = jwt.decode(token=encoded_token, key=secret, algorithms=[algorithm], options={\"verify_aud\": False})\n exp = datetime.fromtimestamp(payload.pop(\"exp\"), tz=timezone.utc)\n iat = datetime.fromtimestamp(payload.pop(\"iat\"), tz=timezone.utc)\n- return Token(exp=exp, iat=iat, **payload)\n+ field_names = {f.name for f in dataclasses.fields(Token)}\n+ extra_fields = payload.keys() - field_names\n+ extras = payload.pop(\"extras\", {})\n+ for key in extra_fields:\n+ extras[key] = payload.pop(key)\n+ return cls(exp=exp, iat=iat, **payload, extras=extras)\n except (KeyError, JWTError, ImproperlyConfiguredException) as e:\n raise NotAuthorizedException(\"Invalid token\") from e\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import asdict, dataclass, field\nfrom datetime import datetime, timezone\nfrom typing import cast\n\nfrom jose import JWSError, JWTError, jwt\n\nfrom litestar.exceptions import ImproperlyConfiguredException, NotAuthorizedException\n\n__all__ = (\"Token\",)\n\n\ndef _normalize_datetime(value: datetime) -> datetime:\n \"\"\"Convert the given value into UTC and strip microseconds.\n\n Args:\n value: A datetime instance\n\n Returns:\n A datetime instance\n \"\"\"\n if value.tzinfo is not None:\n value.astimezone(timezone.utc)\n\n return value.replace(microsecond=0)\n\n\n@dataclass\nclass Token:\n \"\"\"JWT Token DTO.\"\"\"\n\n exp: datetime\n \"\"\"Expiration - datetime for token expiration.\"\"\"\n sub: str\n \"\"\"Subject - usually a unique identifier of the user or equivalent entity.\"\"\"\n iat: datetime = field(default_factory=lambda: _normalize_datetime(datetime.now(timezone.utc)))\n \"\"\"Issued at - should always be current now.\"\"\"\n iss: str | None = field(default=None)\n \"\"\"Issuer - optional unique identifier for the issuer.\"\"\"\n aud: str | None = field(default=None)\n \"\"\"Audience - intended audience.\"\"\"\n jti: str | None = field(default=None)\n \"\"\"JWT ID - a unique identifier of the JWT between different issuers.\"\"\"\n\n def __post_init__(self) -> None:\n if len(self.sub) < 1:\n raise ImproperlyConfiguredException(\"sub must be a string with a length greater than 0\")\n\n if isinstance(self.exp, datetime) and (\n (exp := _normalize_datetime(self.exp))\n and exp.timestamp() >= _normalize_datetime(datetime.now(timezone.utc)).timestamp()\n ):\n self.exp = exp\n else:\n raise ImproperlyConfiguredException(\"exp value must be a datetime in the future\")\n\n if isinstance(self.iat, datetime) and (\n (iat := _normalize_datetime(self.iat))\n and iat.timestamp() <= _normalize_datetime(datetime.now(timezone.utc)).timestamp()\n ):\n self.iat = iat\n else:\n raise ImproperlyConfiguredException(\"iat must be a current or past time\")\n\n @staticmethod\n def decode(encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Token:\n \"\"\"Decode a passed in token string and returns a Token instance.\n\n Args:\n encoded_token: A base64 string containing an encoded JWT.\n secret: The secret with which the JWT is encoded. It may optionally be an individual JWK or JWS set dict\n algorithm: The algorithm used to encode the JWT.\n\n Returns:\n A decoded Token instance.\n\n Raises:\n NotAuthorizedException: If the token is invalid.\n \"\"\"\n try:\n payload = jwt.decode(token=encoded_token, key=secret, algorithms=[algorithm], options={\"verify_aud\": False})\n exp = datetime.fromtimestamp(payload.pop(\"exp\"), tz=timezone.utc)\n iat = datetime.fromtimestamp(payload.pop(\"iat\"), tz=timezone.utc)\n return Token(exp=exp, iat=iat, **payload)\n except (KeyError, JWTError, ImproperlyConfiguredException) as e:\n raise NotAuthorizedException(\"Invalid token\") from e\n\n def encode(self, secret: str, algorithm: str) -> str:\n \"\"\"Encode the token instance into a string.\n\n Args:\n secret: The secret with which the JWT is encoded.\n algorithm: The algorithm used to encode the JWT.\n\n Returns:\n An encoded token string.\n\n Raises:\n ImproperlyConfiguredException: If encoding fails.\n \"\"\"\n try:\n return cast(\n \"str\",\n jwt.encode(\n claims={k: v for k, v in asdict(self).items() if v is not None}, key=secret, algorithm=algorithm\n ),\n )\n except (JWTError, JWSError) as e:\n raise ImproperlyConfiguredException(\"Failed to encode token\") from e\n", "path": "litestar/contrib/jwt/jwt_token.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport dataclasses\nfrom dataclasses import asdict, dataclass, field\nfrom datetime import datetime, timezone\nfrom typing import TYPE_CHECKING, Any, cast\n\nfrom jose import JWSError, JWTError, jwt\n\nfrom litestar.exceptions import ImproperlyConfiguredException, NotAuthorizedException\n\nif TYPE_CHECKING:\n from typing_extensions import Self\n\n\n__all__ = (\"Token\",)\n\n\ndef _normalize_datetime(value: datetime) -> datetime:\n \"\"\"Convert the given value into UTC and strip microseconds.\n\n Args:\n value: A datetime instance\n\n Returns:\n A datetime instance\n \"\"\"\n if value.tzinfo is not None:\n value.astimezone(timezone.utc)\n\n return value.replace(microsecond=0)\n\n\n@dataclass\nclass Token:\n \"\"\"JWT Token DTO.\"\"\"\n\n exp: datetime\n \"\"\"Expiration - datetime for token expiration.\"\"\"\n sub: str\n \"\"\"Subject - usually a unique identifier of the user or equivalent entity.\"\"\"\n iat: datetime = field(default_factory=lambda: _normalize_datetime(datetime.now(timezone.utc)))\n \"\"\"Issued at - should always be current now.\"\"\"\n iss: str | None = field(default=None)\n \"\"\"Issuer - optional unique identifier for the issuer.\"\"\"\n aud: str | None = field(default=None)\n \"\"\"Audience - intended audience.\"\"\"\n jti: str | None = field(default=None)\n \"\"\"JWT ID - a unique identifier of the JWT between different issuers.\"\"\"\n extras: dict[str, Any] = field(default_factory=dict)\n \"\"\"Extra fields that were found on the JWT token.\"\"\"\n\n def __post_init__(self) -> None:\n if len(self.sub) < 1:\n raise ImproperlyConfiguredException(\"sub must be a string with a length greater than 0\")\n\n if isinstance(self.exp, datetime) and (\n (exp := _normalize_datetime(self.exp))\n and exp.timestamp() >= _normalize_datetime(datetime.now(timezone.utc)).timestamp()\n ):\n self.exp = exp\n else:\n raise ImproperlyConfiguredException(\"exp value must be a datetime in the future\")\n\n if isinstance(self.iat, datetime) and (\n (iat := _normalize_datetime(self.iat))\n and iat.timestamp() <= _normalize_datetime(datetime.now(timezone.utc)).timestamp()\n ):\n self.iat = iat\n else:\n raise ImproperlyConfiguredException(\"iat must be a current or past time\")\n\n @classmethod\n def decode(cls, encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Self:\n \"\"\"Decode a passed in token string and returns a Token instance.\n\n Args:\n encoded_token: A base64 string containing an encoded JWT.\n secret: The secret with which the JWT is encoded. It may optionally be an individual JWK or JWS set dict\n algorithm: The algorithm used to encode the JWT.\n\n Returns:\n A decoded Token instance.\n\n Raises:\n NotAuthorizedException: If the token is invalid.\n \"\"\"\n try:\n payload = jwt.decode(token=encoded_token, key=secret, algorithms=[algorithm], options={\"verify_aud\": False})\n exp = datetime.fromtimestamp(payload.pop(\"exp\"), tz=timezone.utc)\n iat = datetime.fromtimestamp(payload.pop(\"iat\"), tz=timezone.utc)\n field_names = {f.name for f in dataclasses.fields(Token)}\n extra_fields = payload.keys() - field_names\n extras = payload.pop(\"extras\", {})\n for key in extra_fields:\n extras[key] = payload.pop(key)\n return cls(exp=exp, iat=iat, **payload, extras=extras)\n except (KeyError, JWTError, ImproperlyConfiguredException) as e:\n raise NotAuthorizedException(\"Invalid token\") from e\n\n def encode(self, secret: str, algorithm: str) -> str:\n \"\"\"Encode the token instance into a string.\n\n Args:\n secret: The secret with which the JWT is encoded.\n algorithm: The algorithm used to encode the JWT.\n\n Returns:\n An encoded token string.\n\n Raises:\n ImproperlyConfiguredException: If encoding fails.\n \"\"\"\n try:\n return cast(\n \"str\",\n jwt.encode(\n claims={k: v for k, v in asdict(self).items() if v is not None}, key=secret, algorithm=algorithm\n ),\n )\n except (JWTError, JWSError) as e:\n raise ImproperlyConfiguredException(\"Failed to encode token\") from e\n", "path": "litestar/contrib/jwt/jwt_token.py"}]}
1,532
575
gh_patches_debug_26260
rasdani/github-patches
git_diff
genialis__resolwe-196
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Elasticserach returns paginated results when querying/mapping features using RESDK In resolwe-bio tools/goea.py `org_features = res.feature.filter(source=args.source_db, query=genes)` should return all genes, not just the first 10. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `resolwe/elastic/viewsets.py` Content: ``` 1 """.. Ignore pydocstyle D400. 2 3 ================ 4 Elastic Viewsets 5 ================ 6 7 .. autoclass:: resolwe.elastic.viewsets.ElasticSearchMixin 8 :members: 9 10 """ 11 from __future__ import absolute_import, division, print_function, unicode_literals 12 13 from elasticsearch_dsl.query import Q 14 15 from django.conf import settings 16 from django.contrib.auth import get_user_model 17 18 from rest_framework.response import Response 19 from rest_framework.viewsets import GenericViewSet 20 21 __all__ = ( 22 'ElasticSearchMixin', 23 'PaginationMixin', 24 'ElasticSearchBaseViewSet', 25 ) 26 27 28 class ElasticSearchMixin(object): 29 """Mixin to use Django REST Framework with ElasticSearch based querysets. 30 31 This mixin adds following methods: 32 * :func:`~ElasticSearchMixin.order_search` 33 * :func:`~ElasticSearchMixin.filter_search` 34 * :func:`~ElasticSearchMixin.filter_permissions` 35 36 """ 37 38 filtering_fields = [] 39 ordering_fields = [] 40 ordering = None 41 42 def get_query_param(self, key, default=None): 43 """Get query parameter uniformly for GET and POST requests.""" 44 value = self.request.query_params.get(key, None) 45 if value is None: 46 value = self.request.data.get(key, None) 47 if value is None: 48 value = default 49 return value 50 51 def order_search(self, search): 52 """Order given search by the ordering parameter given in request. 53 54 :param search: ElasticSearch query object 55 56 """ 57 ordering = self.get_query_param('ordering', self.ordering) 58 59 ordering_field = ordering.lstrip('-') 60 if ordering_field not in self.ordering_fields: 61 raise KeyError('Ordering by `{}` is not supported.'.format(ordering_field)) 62 63 return search.sort(ordering) 64 65 def filter_search(self, search): 66 """Filter given search by the filter parameter given in request. 67 68 :param search: ElasticSearch query object 69 70 """ 71 for field in self.filtering_fields: 72 value = self.get_query_param(field, None) 73 if value: 74 if isinstance(value, list): 75 filters = [Q('match', **{field: item}) for item in value] 76 search = search.query('bool', should=filters) 77 else: 78 search = search.query('wildcard', **{field: value}) 79 80 return search 81 82 def filter_permissions(self, search): 83 """Filter given query based on permissions of the user in the request. 84 85 :param search: ElasticSearch query object 86 87 """ 88 user = self.request.user 89 if user.is_superuser: 90 return search 91 if user.is_anonymous(): 92 user_model = get_user_model() 93 user = user_model.objects.get(**{user_model.USERNAME_FIELD: settings.ANONYMOUS_USER_NAME}) 94 95 filters = [Q('match', users_with_permissions=user.pk)] 96 filters.extend([ 97 Q('match', groups_with_permissions=group.pk) for group in user.groups.all() 98 ]) 99 100 # `minimum_should_match` is set to 1 by default 101 return search.query('bool', should=filters) 102 103 104 class PaginationMixin(object): 105 """Mixin for making paginated response in case pagination parameters are provided.""" 106 107 def paginate_response(self, queryset): 108 """Optionally return paginated response. 109 110 If pagination parameters are provided in the request, then paginated response 111 is returned, otherwise response is not paginated. 112 113 """ 114 page = self.paginate_queryset(queryset) 115 if page is not None: 116 serializer = self.get_serializer(page, many=True) 117 return self.get_paginated_response(serializer.data) 118 119 serializer = self.get_serializer(queryset, many=True) 120 return Response(serializer.data) 121 122 123 class ElasticSearchBaseViewSet(PaginationMixin, ElasticSearchMixin, GenericViewSet): 124 """Base ViewSet for ElasticSearch based views. 125 126 This ViewSet creates search based on ``document_class`` parameter, specified 127 in subclass and: 128 129 * filter it by query parameters 130 * apply ordering 131 * filter permissions 132 * apply pagination 133 134 .. IMPORTANT:: 135 136 Both ``POST`` and ``GET`` requests are supported. 137 """ 138 139 document_class = None 140 141 def custom_filter(self, search): 142 """Perform custom search filtering. 143 144 This method is intended to be overriden in subclasses. 145 """ 146 return search 147 148 def list_with_post(self, request): 149 """Endpoint handler.""" 150 search = self.document_class().search() # pylint: disable=not-callable 151 152 search = self.custom_filter(search) 153 154 search = self.filter_search(search) 155 search = self.order_search(search) 156 search = self.filter_permissions(search) 157 158 return self.paginate_response(search) 159 160 def list(self, request): 161 """Endpoint handler.""" 162 return self.list_with_post(request) 163 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/resolwe/elastic/viewsets.py b/resolwe/elastic/viewsets.py --- a/resolwe/elastic/viewsets.py +++ b/resolwe/elastic/viewsets.py @@ -15,6 +15,7 @@ from django.conf import settings from django.contrib.auth import get_user_model +from rest_framework.exceptions import APIException from rest_framework.response import Response from rest_framework.viewsets import GenericViewSet @@ -24,6 +25,16 @@ 'ElasticSearchBaseViewSet', ) +ELASTICSEARCH_SIZE = 10000 # maximum number of results returned by ElasticSearch + + +class TooManyResults(APIException): + """Exception when elastic query returns more than ``ELASTICSEARCH_SIZE`` results.""" + + status_code = 400 + default_detail = 'Query returned too many results. Please, add more filters or use pagination.' + default_code = 'bad_request' + class ElasticSearchMixin(object): """Mixin to use Django REST Framework with ElasticSearch based querysets. @@ -155,6 +166,11 @@ search = self.order_search(search) search = self.filter_permissions(search) + if search.count() > ELASTICSEARCH_SIZE: + raise TooManyResults() + + search = search.extra(size=ELASTICSEARCH_SIZE) + return self.paginate_response(search) def list(self, request):
{"golden_diff": "diff --git a/resolwe/elastic/viewsets.py b/resolwe/elastic/viewsets.py\n--- a/resolwe/elastic/viewsets.py\n+++ b/resolwe/elastic/viewsets.py\n@@ -15,6 +15,7 @@\n from django.conf import settings\n from django.contrib.auth import get_user_model\n \n+from rest_framework.exceptions import APIException\n from rest_framework.response import Response\n from rest_framework.viewsets import GenericViewSet\n \n@@ -24,6 +25,16 @@\n 'ElasticSearchBaseViewSet',\n )\n \n+ELASTICSEARCH_SIZE = 10000 # maximum number of results returned by ElasticSearch\n+\n+\n+class TooManyResults(APIException):\n+ \"\"\"Exception when elastic query returns more than ``ELASTICSEARCH_SIZE`` results.\"\"\"\n+\n+ status_code = 400\n+ default_detail = 'Query returned too many results. Please, add more filters or use pagination.'\n+ default_code = 'bad_request'\n+\n \n class ElasticSearchMixin(object):\n \"\"\"Mixin to use Django REST Framework with ElasticSearch based querysets.\n@@ -155,6 +166,11 @@\n search = self.order_search(search)\n search = self.filter_permissions(search)\n \n+ if search.count() > ELASTICSEARCH_SIZE:\n+ raise TooManyResults()\n+\n+ search = search.extra(size=ELASTICSEARCH_SIZE)\n+\n return self.paginate_response(search)\n \n def list(self, request):\n", "issue": "Elasticserach returns paginated results when querying/mapping features using RESDK\nIn resolwe-bio tools/goea.py `org_features = res.feature.filter(source=args.source_db, query=genes)` should return all genes, not just the first 10.\n", "before_files": [{"content": "\"\"\".. Ignore pydocstyle D400.\n\n================\nElastic Viewsets\n================\n\n.. autoclass:: resolwe.elastic.viewsets.ElasticSearchMixin\n :members:\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom elasticsearch_dsl.query import Q\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\n\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\n__all__ = (\n 'ElasticSearchMixin',\n 'PaginationMixin',\n 'ElasticSearchBaseViewSet',\n)\n\n\nclass ElasticSearchMixin(object):\n \"\"\"Mixin to use Django REST Framework with ElasticSearch based querysets.\n\n This mixin adds following methods:\n * :func:`~ElasticSearchMixin.order_search`\n * :func:`~ElasticSearchMixin.filter_search`\n * :func:`~ElasticSearchMixin.filter_permissions`\n\n \"\"\"\n\n filtering_fields = []\n ordering_fields = []\n ordering = None\n\n def get_query_param(self, key, default=None):\n \"\"\"Get query parameter uniformly for GET and POST requests.\"\"\"\n value = self.request.query_params.get(key, None)\n if value is None:\n value = self.request.data.get(key, None)\n if value is None:\n value = default\n return value\n\n def order_search(self, search):\n \"\"\"Order given search by the ordering parameter given in request.\n\n :param search: ElasticSearch query object\n\n \"\"\"\n ordering = self.get_query_param('ordering', self.ordering)\n\n ordering_field = ordering.lstrip('-')\n if ordering_field not in self.ordering_fields:\n raise KeyError('Ordering by `{}` is not supported.'.format(ordering_field))\n\n return search.sort(ordering)\n\n def filter_search(self, search):\n \"\"\"Filter given search by the filter parameter given in request.\n\n :param search: ElasticSearch query object\n\n \"\"\"\n for field in self.filtering_fields:\n value = self.get_query_param(field, None)\n if value:\n if isinstance(value, list):\n filters = [Q('match', **{field: item}) for item in value]\n search = search.query('bool', should=filters)\n else:\n search = search.query('wildcard', **{field: value})\n\n return search\n\n def filter_permissions(self, search):\n \"\"\"Filter given query based on permissions of the user in the request.\n\n :param search: ElasticSearch query object\n\n \"\"\"\n user = self.request.user\n if user.is_superuser:\n return search\n if user.is_anonymous():\n user_model = get_user_model()\n user = user_model.objects.get(**{user_model.USERNAME_FIELD: settings.ANONYMOUS_USER_NAME})\n\n filters = [Q('match', users_with_permissions=user.pk)]\n filters.extend([\n Q('match', groups_with_permissions=group.pk) for group in user.groups.all()\n ])\n\n # `minimum_should_match` is set to 1 by default\n return search.query('bool', should=filters)\n\n\nclass PaginationMixin(object):\n \"\"\"Mixin for making paginated response in case pagination parameters are provided.\"\"\"\n\n def paginate_response(self, queryset):\n \"\"\"Optionally return paginated response.\n\n If pagination parameters are provided in the request, then paginated response\n is returned, otherwise response is not paginated.\n\n \"\"\"\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass ElasticSearchBaseViewSet(PaginationMixin, ElasticSearchMixin, GenericViewSet):\n \"\"\"Base ViewSet for ElasticSearch based views.\n\n This ViewSet creates search based on ``document_class`` parameter, specified\n in subclass and:\n\n * filter it by query parameters\n * apply ordering\n * filter permissions\n * apply pagination\n\n .. IMPORTANT::\n\n Both ``POST`` and ``GET`` requests are supported.\n \"\"\"\n\n document_class = None\n\n def custom_filter(self, search):\n \"\"\"Perform custom search filtering.\n\n This method is intended to be overriden in subclasses.\n \"\"\"\n return search\n\n def list_with_post(self, request):\n \"\"\"Endpoint handler.\"\"\"\n search = self.document_class().search() # pylint: disable=not-callable\n\n search = self.custom_filter(search)\n\n search = self.filter_search(search)\n search = self.order_search(search)\n search = self.filter_permissions(search)\n\n return self.paginate_response(search)\n\n def list(self, request):\n \"\"\"Endpoint handler.\"\"\"\n return self.list_with_post(request)\n", "path": "resolwe/elastic/viewsets.py"}], "after_files": [{"content": "\"\"\".. Ignore pydocstyle D400.\n\n================\nElastic Viewsets\n================\n\n.. autoclass:: resolwe.elastic.viewsets.ElasticSearchMixin\n :members:\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom elasticsearch_dsl.query import Q\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\n\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\n__all__ = (\n 'ElasticSearchMixin',\n 'PaginationMixin',\n 'ElasticSearchBaseViewSet',\n)\n\nELASTICSEARCH_SIZE = 10000 # maximum number of results returned by ElasticSearch\n\n\nclass TooManyResults(APIException):\n \"\"\"Exception when elastic query returns more than ``ELASTICSEARCH_SIZE`` results.\"\"\"\n\n status_code = 400\n default_detail = 'Query returned too many results. Please, add more filters or use pagination.'\n default_code = 'bad_request'\n\n\nclass ElasticSearchMixin(object):\n \"\"\"Mixin to use Django REST Framework with ElasticSearch based querysets.\n\n This mixin adds following methods:\n * :func:`~ElasticSearchMixin.order_search`\n * :func:`~ElasticSearchMixin.filter_search`\n * :func:`~ElasticSearchMixin.filter_permissions`\n\n \"\"\"\n\n filtering_fields = []\n ordering_fields = []\n ordering = None\n\n def get_query_param(self, key, default=None):\n \"\"\"Get query parameter uniformly for GET and POST requests.\"\"\"\n value = self.request.query_params.get(key, None)\n if value is None:\n value = self.request.data.get(key, None)\n if value is None:\n value = default\n return value\n\n def order_search(self, search):\n \"\"\"Order given search by the ordering parameter given in request.\n\n :param search: ElasticSearch query object\n\n \"\"\"\n ordering = self.get_query_param('ordering', self.ordering)\n\n ordering_field = ordering.lstrip('-')\n if ordering_field not in self.ordering_fields:\n raise KeyError('Ordering by `{}` is not supported.'.format(ordering_field))\n\n return search.sort(ordering)\n\n def filter_search(self, search):\n \"\"\"Filter given search by the filter parameter given in request.\n\n :param search: ElasticSearch query object\n\n \"\"\"\n for field in self.filtering_fields:\n value = self.get_query_param(field, None)\n if value:\n if isinstance(value, list):\n filters = [Q('match', **{field: item}) for item in value]\n search = search.query('bool', should=filters)\n else:\n search = search.query('wildcard', **{field: value})\n\n return search\n\n def filter_permissions(self, search):\n \"\"\"Filter given query based on permissions of the user in the request.\n\n :param search: ElasticSearch query object\n\n \"\"\"\n user = self.request.user\n if user.is_superuser:\n return search\n if user.is_anonymous():\n user_model = get_user_model()\n user = user_model.objects.get(**{user_model.USERNAME_FIELD: settings.ANONYMOUS_USER_NAME})\n\n filters = [Q('match', users_with_permissions=user.pk)]\n filters.extend([\n Q('match', groups_with_permissions=group.pk) for group in user.groups.all()\n ])\n\n # `minimum_should_match` is set to 1 by default\n return search.query('bool', should=filters)\n\n\nclass PaginationMixin(object):\n \"\"\"Mixin for making paginated response in case pagination parameters are provided.\"\"\"\n\n def paginate_response(self, queryset):\n \"\"\"Optionally return paginated response.\n\n If pagination parameters are provided in the request, then paginated response\n is returned, otherwise response is not paginated.\n\n \"\"\"\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass ElasticSearchBaseViewSet(PaginationMixin, ElasticSearchMixin, GenericViewSet):\n \"\"\"Base ViewSet for ElasticSearch based views.\n\n This ViewSet creates search based on ``document_class`` parameter, specified\n in subclass and:\n\n * filter it by query parameters\n * apply ordering\n * filter permissions\n * apply pagination\n\n .. IMPORTANT::\n\n Both ``POST`` and ``GET`` requests are supported.\n \"\"\"\n\n document_class = None\n\n def custom_filter(self, search):\n \"\"\"Perform custom search filtering.\n\n This method is intended to be overriden in subclasses.\n \"\"\"\n return search\n\n def list_with_post(self, request):\n \"\"\"Endpoint handler.\"\"\"\n search = self.document_class().search() # pylint: disable=not-callable\n\n search = self.custom_filter(search)\n\n search = self.filter_search(search)\n search = self.order_search(search)\n search = self.filter_permissions(search)\n\n if search.count() > ELASTICSEARCH_SIZE:\n raise TooManyResults()\n\n search = search.extra(size=ELASTICSEARCH_SIZE)\n\n return self.paginate_response(search)\n\n def list(self, request):\n \"\"\"Endpoint handler.\"\"\"\n return self.list_with_post(request)\n", "path": "resolwe/elastic/viewsets.py"}]}
1,726
320
gh_patches_debug_28671
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3348
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider avis is broken During the global build at 2021-06-02-14-42-40, spider **avis** failed with **4383 features** and **36 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/avis.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/avis.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/avis.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/avis.py` Content: ``` 1 import scrapy 2 import re 3 4 from locations.items import GeojsonPointItem 5 from locations.hours import OpeningHours 6 7 8 DAY_MAPPING = { 9 'Mon': 'Mo', 10 'Tue': 'Tu', 11 'Wed': 'We', 12 'Thu': 'Th', 13 'Fri': 'Fr', 14 'Sat': 'Sa', 15 'Sun': 'Su' 16 } 17 DAYS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] 18 19 20 class AvisSpider(scrapy.Spider): 21 22 name = "avis" 23 item_attributes = { 'brand': "Avis", 'brand_wikidata': "Q791136" } 24 download_delay = 0.5 25 allowed_domains = [ 26 "avis.com", 27 ] 28 start_urls = ( 29 'https://www.avis.com/en/locations/avisworldwide', 30 ) 31 32 def parse_hours(self, hours): 33 "Sun - Sat 7:00 AM - 10:00 PM" 34 opening_hours = OpeningHours() 35 hours = [h.strip() for h in hours.split(';')] 36 37 for hour in hours: 38 if hour == "Sun - Sat open 24 hrs": 39 return "24/7" 40 range_match = re.search(r'([A-Za-z]{3})\s-\s([A-Za-z]{3})\s([\d:\sAMP]+)\s-\s([\d:\sAMP]+)', hour) 41 if range_match: 42 start_day, end_day, start_time, end_time = range_match.groups() 43 else: 44 single_match = re.search(r'([A-Za-z]{3})\s([\d:\sAMP]+)\s-\s([\d:\sAMP]+)', hour) 45 if not single_match: 46 continue 47 start_day, start_time, end_time = single_match.groups() 48 end_day = start_day 49 50 for day in DAYS[DAYS.index(start_day):DAYS.index(end_day)+1]: 51 opening_hours.add_range(day=DAY_MAPPING[day], 52 open_time=start_time.strip(), 53 close_time=end_time.strip(), 54 time_format='%I:%M %p') 55 return opening_hours.as_opening_hours() 56 57 def parse_store(self, response): 58 if response.url == 'https://www.avis.com/en/error/500': 59 # some closed locations get redirected to this error page 60 return 61 62 def clean(val): 63 if val: 64 return val.strip(', ') 65 return val 66 67 ref = response.url.split('/')[-1] 68 69 properties = { 70 'name': clean(response.xpath('//h2/span[@itemprop="name"]/text()').extract_first()), 71 'addr_full': clean(response.xpath('normalize-space(//span[@itemprop="streetAddress"]/text())').extract_first()), 72 'phone': response.xpath('normalize-space(//span[@itemprop="telephone"]/text())').extract_first(), 73 'city': clean(response.xpath('normalize-space(//span[@itemprop="addressLocality"]/text())').extract_first()), 74 'state': clean(response.xpath('normalize-space(//span[@itemprop="addressRegion"]/text())').extract_first()), 75 'postcode': clean(response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first()), 76 'country': clean(response.xpath('normalize-space(//span[@itemprop="addressCountry"]/text())').extract_first()), 77 'ref': ref, 78 'website': response.url, 79 'lat': float(response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()), 80 'lon': float(response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()), 81 } 82 hours = response.xpath('//meta[@itemprop="openingHours"]/@content').extract_first() 83 if hours: 84 properties['opening_hours'] = self.parse_hours(hours) 85 yield GeojsonPointItem(**properties) 86 87 def parse_state(self, response): 88 urls = response.xpath('//ul[contains(@class, "location-list-ul")]//li/a/@href').extract() 89 90 if not urls: 91 urls = set(response.xpath('//ul[contains(@class, "LocContainer")]//a/@href').extract()) 92 urls = [u for u in urls if 'javascript:void' not in u] 93 94 location_list = re.compile("^/en/locations/(?:us|ca|au)/[a-z]{2}/[^/]+$") 95 us_single_location = re.compile(r'/en/locations/(?:us|ca|au)/[a-z]{2}/[^/]+/[^/]+$') 96 single_location = re.compile(r'/en/locations/(?!us|ca|au)[a-z]{2}/[^/]+/[^/]+$') 97 98 for url in urls: 99 if single_location.match(url) or us_single_location.match(url): 100 yield scrapy.Request(response.urljoin(url), callback=self.parse_store) 101 elif location_list.match(url): 102 # skip these, we get them already 103 continue 104 elif 'xx' in url: 105 continue 106 107 def parse_country(self,response): 108 urls = response.xpath('//div[contains(@class,"country-wrapper")]//li/a/@href').extract() 109 110 for url in urls: 111 yield scrapy.Request(response.urljoin(url), callback=self.parse_state) 112 113 def parse(self, response): 114 urls = response.xpath('//div[@class="wl-location-state"]//li/a/@href').extract() 115 116 for url in urls: 117 yield scrapy.Request(response.urljoin(url), callback=self.parse_country) 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/avis.py b/locations/spiders/avis.py --- a/locations/spiders/avis.py +++ b/locations/spiders/avis.py @@ -66,6 +66,15 @@ ref = response.url.split('/')[-1] + latitude = None + longitude = None + + if response.xpath('//meta[@itemprop="latitude"]/@content').extract_first() is not None: + latitude = float(response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()) + + if response.xpath('//meta[@itemprop="longitude"]/@content').extract_first() is not None: + longitude = float(response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()) + properties = { 'name': clean(response.xpath('//h2/span[@itemprop="name"]/text()').extract_first()), 'addr_full': clean(response.xpath('normalize-space(//span[@itemprop="streetAddress"]/text())').extract_first()), @@ -76,8 +85,8 @@ 'country': clean(response.xpath('normalize-space(//span[@itemprop="addressCountry"]/text())').extract_first()), 'ref': ref, 'website': response.url, - 'lat': float(response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()), - 'lon': float(response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()), + 'lat': latitude, + 'lon': longitude, } hours = response.xpath('//meta[@itemprop="openingHours"]/@content').extract_first() if hours:
{"golden_diff": "diff --git a/locations/spiders/avis.py b/locations/spiders/avis.py\n--- a/locations/spiders/avis.py\n+++ b/locations/spiders/avis.py\n@@ -66,6 +66,15 @@\n \n ref = response.url.split('/')[-1]\n \n+ latitude = None\n+ longitude = None\n+\n+ if response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first() is not None:\n+ latitude = float(response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first())\n+\n+ if response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first() is not None:\n+ longitude = float(response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first())\n+\n properties = {\n 'name': clean(response.xpath('//h2/span[@itemprop=\"name\"]/text()').extract_first()),\n 'addr_full': clean(response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/text())').extract_first()),\n@@ -76,8 +85,8 @@\n 'country': clean(response.xpath('normalize-space(//span[@itemprop=\"addressCountry\"]/text())').extract_first()),\n 'ref': ref,\n 'website': response.url,\n- 'lat': float(response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first()),\n- 'lon': float(response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first()),\n+ 'lat': latitude,\n+ 'lon': longitude,\n }\n hours = response.xpath('//meta[@itemprop=\"openingHours\"]/@content').extract_first()\n if hours:\n", "issue": "Spider avis is broken\nDuring the global build at 2021-06-02-14-42-40, spider **avis** failed with **4383 features** and **36 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/avis.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/avis.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/avis.geojson))\n", "before_files": [{"content": "import scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n 'Mon': 'Mo',\n 'Tue': 'Tu',\n 'Wed': 'We',\n 'Thu': 'Th',\n 'Fri': 'Fr',\n 'Sat': 'Sa',\n 'Sun': 'Su'\n}\nDAYS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n\n\nclass AvisSpider(scrapy.Spider):\n\n name = \"avis\"\n item_attributes = { 'brand': \"Avis\", 'brand_wikidata': \"Q791136\" }\n download_delay = 0.5\n allowed_domains = [\n \"avis.com\",\n ]\n start_urls = (\n 'https://www.avis.com/en/locations/avisworldwide',\n )\n\n def parse_hours(self, hours):\n \"Sun - Sat 7:00 AM - 10:00 PM\"\n opening_hours = OpeningHours()\n hours = [h.strip() for h in hours.split(';')]\n\n for hour in hours:\n if hour == \"Sun - Sat open 24 hrs\":\n return \"24/7\"\n range_match = re.search(r'([A-Za-z]{3})\\s-\\s([A-Za-z]{3})\\s([\\d:\\sAMP]+)\\s-\\s([\\d:\\sAMP]+)', hour)\n if range_match:\n start_day, end_day, start_time, end_time = range_match.groups()\n else:\n single_match = re.search(r'([A-Za-z]{3})\\s([\\d:\\sAMP]+)\\s-\\s([\\d:\\sAMP]+)', hour)\n if not single_match:\n continue\n start_day, start_time, end_time = single_match.groups()\n end_day = start_day\n\n for day in DAYS[DAYS.index(start_day):DAYS.index(end_day)+1]:\n opening_hours.add_range(day=DAY_MAPPING[day],\n open_time=start_time.strip(),\n close_time=end_time.strip(),\n time_format='%I:%M %p')\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n if response.url == 'https://www.avis.com/en/error/500':\n # some closed locations get redirected to this error page\n return\n\n def clean(val):\n if val:\n return val.strip(', ')\n return val\n\n ref = response.url.split('/')[-1]\n\n properties = {\n 'name': clean(response.xpath('//h2/span[@itemprop=\"name\"]/text()').extract_first()),\n 'addr_full': clean(response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/text())').extract_first()),\n 'phone': response.xpath('normalize-space(//span[@itemprop=\"telephone\"]/text())').extract_first(),\n 'city': clean(response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]/text())').extract_first()),\n 'state': clean(response.xpath('normalize-space(//span[@itemprop=\"addressRegion\"]/text())').extract_first()),\n 'postcode': clean(response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first()),\n 'country': clean(response.xpath('normalize-space(//span[@itemprop=\"addressCountry\"]/text())').extract_first()),\n 'ref': ref,\n 'website': response.url,\n 'lat': float(response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first()),\n }\n hours = response.xpath('//meta[@itemprop=\"openingHours\"]/@content').extract_first()\n if hours:\n properties['opening_hours'] = self.parse_hours(hours)\n yield GeojsonPointItem(**properties)\n\n def parse_state(self, response):\n urls = response.xpath('//ul[contains(@class, \"location-list-ul\")]//li/a/@href').extract()\n\n if not urls:\n urls = set(response.xpath('//ul[contains(@class, \"LocContainer\")]//a/@href').extract())\n urls = [u for u in urls if 'javascript:void' not in u]\n\n location_list = re.compile(\"^/en/locations/(?:us|ca|au)/[a-z]{2}/[^/]+$\")\n us_single_location = re.compile(r'/en/locations/(?:us|ca|au)/[a-z]{2}/[^/]+/[^/]+$')\n single_location = re.compile(r'/en/locations/(?!us|ca|au)[a-z]{2}/[^/]+/[^/]+$')\n\n for url in urls:\n if single_location.match(url) or us_single_location.match(url):\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n elif location_list.match(url):\n # skip these, we get them already\n continue\n elif 'xx' in url:\n continue\n\n def parse_country(self,response):\n urls = response.xpath('//div[contains(@class,\"country-wrapper\")]//li/a/@href').extract()\n\n for url in urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_state)\n\n def parse(self, response):\n urls = response.xpath('//div[@class=\"wl-location-state\"]//li/a/@href').extract()\n\n for url in urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_country)\n", "path": "locations/spiders/avis.py"}], "after_files": [{"content": "import scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n 'Mon': 'Mo',\n 'Tue': 'Tu',\n 'Wed': 'We',\n 'Thu': 'Th',\n 'Fri': 'Fr',\n 'Sat': 'Sa',\n 'Sun': 'Su'\n}\nDAYS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n\n\nclass AvisSpider(scrapy.Spider):\n\n name = \"avis\"\n item_attributes = { 'brand': \"Avis\", 'brand_wikidata': \"Q791136\" }\n download_delay = 0.5\n allowed_domains = [\n \"avis.com\",\n ]\n start_urls = (\n 'https://www.avis.com/en/locations/avisworldwide',\n )\n\n def parse_hours(self, hours):\n \"Sun - Sat 7:00 AM - 10:00 PM\"\n opening_hours = OpeningHours()\n hours = [h.strip() for h in hours.split(';')]\n\n for hour in hours:\n if hour == \"Sun - Sat open 24 hrs\":\n return \"24/7\"\n range_match = re.search(r'([A-Za-z]{3})\\s-\\s([A-Za-z]{3})\\s([\\d:\\sAMP]+)\\s-\\s([\\d:\\sAMP]+)', hour)\n if range_match:\n start_day, end_day, start_time, end_time = range_match.groups()\n else:\n single_match = re.search(r'([A-Za-z]{3})\\s([\\d:\\sAMP]+)\\s-\\s([\\d:\\sAMP]+)', hour)\n if not single_match:\n continue\n start_day, start_time, end_time = single_match.groups()\n end_day = start_day\n\n for day in DAYS[DAYS.index(start_day):DAYS.index(end_day)+1]:\n opening_hours.add_range(day=DAY_MAPPING[day],\n open_time=start_time.strip(),\n close_time=end_time.strip(),\n time_format='%I:%M %p')\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n if response.url == 'https://www.avis.com/en/error/500':\n # some closed locations get redirected to this error page\n return\n\n def clean(val):\n if val:\n return val.strip(', ')\n return val\n\n ref = response.url.split('/')[-1]\n\n latitude = None\n longitude = None\n\n if response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first() is not None:\n latitude = float(response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first())\n\n if response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first() is not None:\n longitude = float(response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first())\n\n properties = {\n 'name': clean(response.xpath('//h2/span[@itemprop=\"name\"]/text()').extract_first()),\n 'addr_full': clean(response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/text())').extract_first()),\n 'phone': response.xpath('normalize-space(//span[@itemprop=\"telephone\"]/text())').extract_first(),\n 'city': clean(response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]/text())').extract_first()),\n 'state': clean(response.xpath('normalize-space(//span[@itemprop=\"addressRegion\"]/text())').extract_first()),\n 'postcode': clean(response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first()),\n 'country': clean(response.xpath('normalize-space(//span[@itemprop=\"addressCountry\"]/text())').extract_first()),\n 'ref': ref,\n 'website': response.url,\n 'lat': latitude,\n 'lon': longitude,\n }\n hours = response.xpath('//meta[@itemprop=\"openingHours\"]/@content').extract_first()\n if hours:\n properties['opening_hours'] = self.parse_hours(hours)\n yield GeojsonPointItem(**properties)\n\n def parse_state(self, response):\n urls = response.xpath('//ul[contains(@class, \"location-list-ul\")]//li/a/@href').extract()\n\n if not urls:\n urls = set(response.xpath('//ul[contains(@class, \"LocContainer\")]//a/@href').extract())\n urls = [u for u in urls if 'javascript:void' not in u]\n\n location_list = re.compile(\"^/en/locations/(?:us|ca|au)/[a-z]{2}/[^/]+$\")\n us_single_location = re.compile(r'/en/locations/(?:us|ca|au)/[a-z]{2}/[^/]+/[^/]+$')\n single_location = re.compile(r'/en/locations/(?!us|ca|au)[a-z]{2}/[^/]+/[^/]+$')\n\n for url in urls:\n if single_location.match(url) or us_single_location.match(url):\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n elif location_list.match(url):\n # skip these, we get them already\n continue\n elif 'xx' in url:\n continue\n\n def parse_country(self,response):\n urls = response.xpath('//div[contains(@class,\"country-wrapper\")]//li/a/@href').extract()\n\n for url in urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_state)\n\n def parse(self, response):\n urls = response.xpath('//div[@class=\"wl-location-state\"]//li/a/@href').extract()\n\n for url in urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_country)\n", "path": "locations/spiders/avis.py"}]}
1,867
360
gh_patches_debug_13499
rasdani/github-patches
git_diff
lutris__lutris-488
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Specify a User Agent for HTTP requests Right now it's python-urllib/someversion, and Cloudflare sites (tested on medium protection site) blocks it and returns 403 status code. Testing the same url with curl works without it blocking, so I'm guessing Cloudflare checks the request UA. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lutris/util/http.py` Content: ``` 1 import json 2 import socket 3 import urllib.request 4 import urllib.error 5 import urllib.parse 6 from ssl import CertificateError 7 8 from lutris.settings import SITE_URL 9 from lutris.util.log import logger 10 11 12 class Request(object): 13 def __init__(self, url, timeout=5, stop_request=None, 14 thread_queue=None, headers={}): 15 16 if not url: 17 raise ValueError('An URL is required!') 18 19 if url.startswith('//'): 20 url = 'https:' + url 21 22 if url.startswith('/'): 23 url = SITE_URL + url 24 25 self.url = url 26 self.content = '' 27 self.timeout = timeout 28 self.stop_request = stop_request 29 self.thread_queue = thread_queue 30 self.buffer_size = 32 * 1024 # Bytes 31 self.downloaded_size = 0 32 self.headers = headers 33 34 def get(self, data=None): 35 req = urllib.request.Request(url=self.url, data=data, headers=self.headers) 36 try: 37 request = urllib.request.urlopen(req, timeout=self.timeout) 38 except (urllib.error.HTTPError, CertificateError) as e: 39 logger.error("Unavailable url (%s): %s", self.url, e) 40 except (socket.timeout, urllib.error.URLError) as e: 41 logger.error("Unable to connect to server (%s): %s", self.url, e) 42 else: 43 try: 44 total_size = request.info().get('Content-Length').strip() 45 total_size = int(total_size) 46 except AttributeError: 47 total_size = 0 48 49 chunks = [] 50 while 1: 51 if self.stop_request and self.stop_request.is_set(): 52 self.content = '' 53 return self 54 try: 55 chunk = request.read(self.buffer_size) 56 except socket.timeout as e: 57 logger.error("Request timed out") 58 self.content = '' 59 return self 60 self.downloaded_size += len(chunk) 61 if self.thread_queue: 62 self.thread_queue.put( 63 (chunk, self.downloaded_size, total_size) 64 ) 65 else: 66 chunks.append(chunk) 67 if not chunk: 68 break 69 request.close() 70 self.content = b''.join(chunks) 71 return self 72 73 def post(self, data): 74 raise NotImplementedError 75 76 def write_to_file(self, path): 77 content = self.content 78 if content: 79 with open(path, 'wb') as dest_file: 80 dest_file.write(content) 81 82 @property 83 def json(self): 84 if self.content: 85 return json.loads(self.text) 86 87 @property 88 def text(self): 89 if self.content: 90 return self.content.decode() 91 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lutris/util/http.py b/lutris/util/http.py --- a/lutris/util/http.py +++ b/lutris/util/http.py @@ -5,6 +5,8 @@ import urllib.parse from ssl import CertificateError +from lutris.settings import PROJECT +from lutris.settings import VERSION from lutris.settings import SITE_URL from lutris.util.log import logger @@ -29,6 +31,8 @@ self.thread_queue = thread_queue self.buffer_size = 32 * 1024 # Bytes self.downloaded_size = 0 + if not headers.get('User-Agent'): + headers['User-Agent'] = PROJECT + '/' + VERSION self.headers = headers def get(self, data=None):
{"golden_diff": "diff --git a/lutris/util/http.py b/lutris/util/http.py\n--- a/lutris/util/http.py\n+++ b/lutris/util/http.py\n@@ -5,6 +5,8 @@\n import urllib.parse\n from ssl import CertificateError\n \n+from lutris.settings import PROJECT\n+from lutris.settings import VERSION\n from lutris.settings import SITE_URL\n from lutris.util.log import logger\n \n@@ -29,6 +31,8 @@\n self.thread_queue = thread_queue\n self.buffer_size = 32 * 1024 # Bytes\n self.downloaded_size = 0\n+ if not headers.get('User-Agent'):\n+ headers['User-Agent'] = PROJECT + '/' + VERSION\n self.headers = headers\n \n def get(self, data=None):\n", "issue": "Specify a User Agent for HTTP requests\nRight now it's python-urllib/someversion, and Cloudflare sites (tested on medium protection site) blocks it and returns 403 status code.\r\nTesting the same url with curl works without it blocking, so I'm guessing Cloudflare checks the request UA.\n", "before_files": [{"content": "import json\nimport socket\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nfrom ssl import CertificateError\n\nfrom lutris.settings import SITE_URL\nfrom lutris.util.log import logger\n\n\nclass Request(object):\n def __init__(self, url, timeout=5, stop_request=None,\n thread_queue=None, headers={}):\n\n if not url:\n raise ValueError('An URL is required!')\n\n if url.startswith('//'):\n url = 'https:' + url\n\n if url.startswith('/'):\n url = SITE_URL + url\n\n self.url = url\n self.content = ''\n self.timeout = timeout\n self.stop_request = stop_request\n self.thread_queue = thread_queue\n self.buffer_size = 32 * 1024 # Bytes\n self.downloaded_size = 0\n self.headers = headers\n\n def get(self, data=None):\n req = urllib.request.Request(url=self.url, data=data, headers=self.headers)\n try:\n request = urllib.request.urlopen(req, timeout=self.timeout)\n except (urllib.error.HTTPError, CertificateError) as e:\n logger.error(\"Unavailable url (%s): %s\", self.url, e)\n except (socket.timeout, urllib.error.URLError) as e:\n logger.error(\"Unable to connect to server (%s): %s\", self.url, e)\n else:\n try:\n total_size = request.info().get('Content-Length').strip()\n total_size = int(total_size)\n except AttributeError:\n total_size = 0\n\n chunks = []\n while 1:\n if self.stop_request and self.stop_request.is_set():\n self.content = ''\n return self\n try:\n chunk = request.read(self.buffer_size)\n except socket.timeout as e:\n logger.error(\"Request timed out\")\n self.content = ''\n return self\n self.downloaded_size += len(chunk)\n if self.thread_queue:\n self.thread_queue.put(\n (chunk, self.downloaded_size, total_size)\n )\n else:\n chunks.append(chunk)\n if not chunk:\n break\n request.close()\n self.content = b''.join(chunks)\n return self\n\n def post(self, data):\n raise NotImplementedError\n\n def write_to_file(self, path):\n content = self.content\n if content:\n with open(path, 'wb') as dest_file:\n dest_file.write(content)\n\n @property\n def json(self):\n if self.content:\n return json.loads(self.text)\n\n @property\n def text(self):\n if self.content:\n return self.content.decode()\n", "path": "lutris/util/http.py"}], "after_files": [{"content": "import json\nimport socket\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nfrom ssl import CertificateError\n\nfrom lutris.settings import PROJECT\nfrom lutris.settings import VERSION\nfrom lutris.settings import SITE_URL\nfrom lutris.util.log import logger\n\n\nclass Request(object):\n def __init__(self, url, timeout=5, stop_request=None,\n thread_queue=None, headers={}):\n\n if not url:\n raise ValueError('An URL is required!')\n\n if url.startswith('//'):\n url = 'https:' + url\n\n if url.startswith('/'):\n url = SITE_URL + url\n\n self.url = url\n self.content = ''\n self.timeout = timeout\n self.stop_request = stop_request\n self.thread_queue = thread_queue\n self.buffer_size = 32 * 1024 # Bytes\n self.downloaded_size = 0\n if not headers.get('User-Agent'):\n headers['User-Agent'] = PROJECT + '/' + VERSION\n self.headers = headers\n\n def get(self, data=None):\n req = urllib.request.Request(url=self.url, data=data, headers=self.headers)\n try:\n request = urllib.request.urlopen(req, timeout=self.timeout)\n except (urllib.error.HTTPError, CertificateError) as e:\n logger.error(\"Unavailable url (%s): %s\", self.url, e)\n except (socket.timeout, urllib.error.URLError) as e:\n logger.error(\"Unable to connect to server (%s): %s\", self.url, e)\n else:\n try:\n total_size = request.info().get('Content-Length').strip()\n total_size = int(total_size)\n except AttributeError:\n total_size = 0\n\n chunks = []\n while 1:\n if self.stop_request and self.stop_request.is_set():\n self.content = ''\n return self\n try:\n chunk = request.read(self.buffer_size)\n except socket.timeout as e:\n logger.error(\"Request timed out\")\n self.content = ''\n return self\n self.downloaded_size += len(chunk)\n if self.thread_queue:\n self.thread_queue.put(\n (chunk, self.downloaded_size, total_size)\n )\n else:\n chunks.append(chunk)\n if not chunk:\n break\n request.close()\n self.content = b''.join(chunks)\n return self\n\n def post(self, data):\n raise NotImplementedError\n\n def write_to_file(self, path):\n content = self.content\n if content:\n with open(path, 'wb') as dest_file:\n dest_file.write(content)\n\n @property\n def json(self):\n if self.content:\n return json.loads(self.text)\n\n @property\n def text(self):\n if self.content:\n return self.content.decode()\n", "path": "lutris/util/http.py"}]}
1,056
173
gh_patches_debug_6933
rasdani/github-patches
git_diff
Flexget__Flexget-3204
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- python 3.10 issue I have an issue with python 3.10 and Flexget. Greenlet has been updated to 1.1.2 because the 1.0.0 version is not compatible with python 3.10. After that Flexget was installed successfully but I got the error message below. - FlexGet version: 3.1.137 - Python version: 3.10 - Installation method: pip - Using daemon (yes/no): no - OS and version: Linux / Slackware / 5.14.8 kernel Traceback (most recent call last): File "/usr/bin/flexget", line 5, in <module> from flexget import main File "/usr/lib/python3.10/site-packages/flexget/__init__.py", line 11, in <module> from flexget.manager import Manager # noqa File "/usr/lib/python3.10/site-packages/flexget/manager.py", line 47, in <module> from flexget.ipc import IPCClient, IPCServer # noqa File "/usr/lib/python3.10/site-packages/flexget/ipc.py", line 14, in <module> from flexget import terminal File "/usr/lib/python3.10/site-packages/flexget/terminal.py", line 7, in <module> from colorclass import Color, Windows File "/usr/lib/python3.10/site-packages/colorclass/__init__.py", line 11, in <module> from colorclass.codes import list_tags # noqa File "/usr/lib/python3.10/site-packages/colorclass/codes.py", line 4, in <module> from collections import Mapping ImportError: cannot import name 'Mapping' from 'collections' (/usr/lib/python3.10/collections/__init__.py) Thanks! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import sys 2 from pathlib import Path 3 from typing import List 4 5 from setuptools import find_packages, setup 6 7 long_description = Path('README.rst').read_text() 8 9 # Populates __version__ without importing the package 10 __version__ = None 11 with open('flexget/_version.py', encoding='utf-8') as ver_file: 12 exec(ver_file.read()) # pylint: disable=W0122 13 if not __version__: 14 print('Could not find __version__ from flexget/_version.py') 15 sys.exit(1) 16 17 18 def load_requirements(filename: str) -> List[str]: 19 return [ 20 line.strip() 21 for line in Path(filename).read_text().splitlines() 22 if not line.startswith('#') 23 ] 24 25 26 setup( 27 name='FlexGet', 28 version=__version__, 29 description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) ' 30 'from different sources like RSS-feeds, html-pages, various sites and more.', 31 long_description=long_description, 32 long_description_content_type='text/x-rst', 33 author='Marko Koivusalo', 34 author_email='marko.koivusalo@gmail.com', 35 license='MIT', 36 url='https://flexget.com', 37 project_urls={ 38 'Repository': 'https://github.com/Flexget/Flexget', 39 'Issue Tracker': 'https://github.com/Flexget/Flexget/issues', 40 'Forum': 'https://discuss.flexget.com', 41 }, 42 packages=find_packages(exclude=['flexget.tests']), 43 include_package_data=True, 44 zip_safe=False, 45 install_requires=load_requirements('requirements.txt'), 46 tests_require=['pytest'], 47 extras_require={'dev': load_requirements('dev-requirements.txt')}, 48 entry_points={ 49 'console_scripts': ['flexget = flexget:main'], 50 'gui_scripts': [ 51 'flexget-headless = flexget:main' 52 ], # This is useful on Windows to avoid a cmd popup 53 }, 54 python_requires='>=3.6', 55 classifiers=[ 56 "Development Status :: 5 - Production/Stable", 57 "License :: OSI Approved :: MIT License", 58 "Operating System :: OS Independent", 59 "Programming Language :: Python", 60 "Programming Language :: Python :: 3.6", 61 "Programming Language :: Python :: 3.7", 62 "Programming Language :: Python :: 3.8", 63 "Programming Language :: Python :: 3.9", 64 "Programming Language :: Python :: Implementation :: CPython", 65 "Programming Language :: Python :: Implementation :: PyPy", 66 ], 67 ) 68 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -61,6 +61,7 @@ "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,6 +61,7 @@\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n", "issue": "python 3.10 issue\nI have an issue with python 3.10 and Flexget. Greenlet has been updated to 1.1.2 because the 1.0.0 version is not compatible with python 3.10. After that Flexget was installed successfully but I got the error message below.\r\n\r\n- FlexGet version: 3.1.137\r\n- Python version: 3.10\r\n- Installation method: pip\r\n- Using daemon (yes/no): no\r\n- OS and version: Linux / Slackware / 5.14.8 kernel\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/bin/flexget\", line 5, in <module>\r\n from flexget import main\r\n File \"/usr/lib/python3.10/site-packages/flexget/__init__.py\", line 11, in <module>\r\n from flexget.manager import Manager # noqa\r\n File \"/usr/lib/python3.10/site-packages/flexget/manager.py\", line 47, in <module>\r\n from flexget.ipc import IPCClient, IPCServer # noqa\r\n File \"/usr/lib/python3.10/site-packages/flexget/ipc.py\", line 14, in <module>\r\n from flexget import terminal\r\n File \"/usr/lib/python3.10/site-packages/flexget/terminal.py\", line 7, in <module>\r\n from colorclass import Color, Windows\r\n File \"/usr/lib/python3.10/site-packages/colorclass/__init__.py\", line 11, in <module>\r\n from colorclass.codes import list_tags # noqa\r\n File \"/usr/lib/python3.10/site-packages/colorclass/codes.py\", line 4, in <module>\r\n from collections import Mapping\r\nImportError: cannot import name 'Mapping' from 'collections' (/usr/lib/python3.10/collections/__init__.py)\r\n\r\nThanks!\n", "before_files": [{"content": "import sys\nfrom pathlib import Path\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\nlong_description = Path('README.rst').read_text()\n\n# Populates __version__ without importing the package\n__version__ = None\nwith open('flexget/_version.py', encoding='utf-8') as ver_file:\n exec(ver_file.read()) # pylint: disable=W0122\nif not __version__:\n print('Could not find __version__ from flexget/_version.py')\n sys.exit(1)\n\n\ndef load_requirements(filename: str) -> List[str]:\n return [\n line.strip()\n for line in Path(filename).read_text().splitlines()\n if not line.startswith('#')\n ]\n\n\nsetup(\n name='FlexGet',\n version=__version__,\n description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '\n 'from different sources like RSS-feeds, html-pages, various sites and more.',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n author='Marko Koivusalo',\n author_email='marko.koivusalo@gmail.com',\n license='MIT',\n url='https://flexget.com',\n project_urls={\n 'Repository': 'https://github.com/Flexget/Flexget',\n 'Issue Tracker': 'https://github.com/Flexget/Flexget/issues',\n 'Forum': 'https://discuss.flexget.com',\n },\n packages=find_packages(exclude=['flexget.tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=load_requirements('requirements.txt'),\n tests_require=['pytest'],\n extras_require={'dev': load_requirements('dev-requirements.txt')},\n entry_points={\n 'console_scripts': ['flexget = flexget:main'],\n 'gui_scripts': [\n 'flexget-headless = flexget:main'\n ], # This is useful on Windows to avoid a cmd popup\n },\n python_requires='>=3.6',\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import sys\nfrom pathlib import Path\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\nlong_description = Path('README.rst').read_text()\n\n# Populates __version__ without importing the package\n__version__ = None\nwith open('flexget/_version.py', encoding='utf-8') as ver_file:\n exec(ver_file.read()) # pylint: disable=W0122\nif not __version__:\n print('Could not find __version__ from flexget/_version.py')\n sys.exit(1)\n\n\ndef load_requirements(filename: str) -> List[str]:\n return [\n line.strip()\n for line in Path(filename).read_text().splitlines()\n if not line.startswith('#')\n ]\n\n\nsetup(\n name='FlexGet',\n version=__version__,\n description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '\n 'from different sources like RSS-feeds, html-pages, various sites and more.',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n author='Marko Koivusalo',\n author_email='marko.koivusalo@gmail.com',\n license='MIT',\n url='https://flexget.com',\n project_urls={\n 'Repository': 'https://github.com/Flexget/Flexget',\n 'Issue Tracker': 'https://github.com/Flexget/Flexget/issues',\n 'Forum': 'https://discuss.flexget.com',\n },\n packages=find_packages(exclude=['flexget.tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=load_requirements('requirements.txt'),\n tests_require=['pytest'],\n extras_require={'dev': load_requirements('dev-requirements.txt')},\n entry_points={\n 'console_scripts': ['flexget = flexget:main'],\n 'gui_scripts': [\n 'flexget-headless = flexget:main'\n ], # This is useful on Windows to avoid a cmd popup\n },\n python_requires='>=3.6',\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n)\n", "path": "setup.py"}]}
1,355
108